]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.3-201504021826.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.3-201504021826.patch
CommitLineData
3764e23b
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..6eabd3c 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2283,6 +2290,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2584,6 +2595,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 713bf26..9ceae96 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -879,7 +947,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -926,6 +994,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1095,6 +1168,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer
547+ signing_key.x509.signer \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1215,7 +1293,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1521,17 +1601,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1543,11 +1627,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index d8f9b7e..f6222fa 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -102,6 +102,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index 97d07ed..2931f2b 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..eaa807d 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1367+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index afb9caf..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 891a56b..48f337e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -36,7 +36,7 @@ struct outer_cache_fns {
1842
1843 /* This is an ARM L2C thing */
1844 void (*write_sec)(unsigned long, unsigned);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index f027941..f36ce30 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -126,6 +126,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a31ecdad..95e98d4 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -81,6 +81,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -92,10 +93,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index d5cac54..906ea3e 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index d890e41..3921292 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -78,9 +78,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 .restart_block = { \
2125 .fn = do_no_restart_syscall, \
2126 }, \
2127@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index 4767eb9..bf00668 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a,b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x,p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x,p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check(x,p); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x,p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x,p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check(x,p); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2260 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x,ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x),(ptr),__gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x,ptr,err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x),(ptr),err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x,ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x),(ptr),__pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x,ptr,err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x),(ptr),err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 2f5555d..d493c91 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -47,6 +47,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -89,11 +170,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -478,7 +575,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -512,11 +611,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 664eee8..f470938 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -437,7 +437,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index bea7db9..a210d10 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 5038960..4aa71d8 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index fdfa3a7..5d208b8 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -207,6 +207,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -220,7 +221,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f73891b..cf3004e 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -28,7 +28,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index e55408e..14d9998 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3058 asm("mrc p15, 0, %0, c0, c1, 4"
3059 : "=r" (mmfr0));
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 8aa6f1b..0899e08 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index 0b0d58a..988cb45 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3438index ce25e85..3dd7850 100644
3439--- a/arch/arm/mach-at91/setup.c
3440+++ b/arch/arm/mach-at91/setup.c
3441@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3442
3443 desc->pfn = __phys_to_pfn(base);
3444 desc->length = length;
3445- desc->type = MT_MEMORY_RWX_NONCACHED;
3446+ desc->type = MT_MEMORY_RW_NONCACHED;
3447
3448 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3449 base, length, desc->virtual);
3450diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3451index f8e7dcd..17ee921 100644
3452--- a/arch/arm/mach-exynos/suspend.c
3453+++ b/arch/arm/mach-exynos/suspend.c
3454@@ -18,6 +18,7 @@
3455 #include <linux/syscore_ops.h>
3456 #include <linux/cpu_pm.h>
3457 #include <linux/io.h>
3458+#include <linux/irq.h>
3459 #include <linux/irqchip/arm-gic.h>
3460 #include <linux/err.h>
3461 #include <linux/regulator/machine.h>
3462@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3463 tmp |= pm_data->wake_disable_mask;
3464 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3465
3466- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_open_kernel();
3469+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3470+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3471+ pax_close_kernel();
3472
3473 register_syscore_ops(&exynos_pm_syscore_ops);
3474 suspend_set_ops(&exynos_suspend_ops);
3475diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3476index 7f352de..6dc0929 100644
3477--- a/arch/arm/mach-keystone/keystone.c
3478+++ b/arch/arm/mach-keystone/keystone.c
3479@@ -27,7 +27,7 @@
3480
3481 #include "keystone.h"
3482
3483-static struct notifier_block platform_nb;
3484+static notifier_block_no_const platform_nb;
3485 static unsigned long keystone_dma_pfn_offset __read_mostly;
3486
3487 static int keystone_platform_notifier(struct notifier_block *nb,
3488diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3489index ccef880..5dfad80 100644
3490--- a/arch/arm/mach-mvebu/coherency.c
3491+++ b/arch/arm/mach-mvebu/coherency.c
3492@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3493
3494 /*
3495 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3496- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3497+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3498 * is needed as a workaround for a deadlock issue between the PCIe
3499 * interface and the cache controller.
3500 */
3501@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3502 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3503
3504 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3505- mtype = MT_UNCACHED;
3506+ mtype = MT_UNCACHED_RW;
3507
3508 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3509 }
3510diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3511index b6443a4..20a0b74 100644
3512--- a/arch/arm/mach-omap2/board-n8x0.c
3513+++ b/arch/arm/mach-omap2/board-n8x0.c
3514@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3515 }
3516 #endif
3517
3518-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3519+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3520 .late_init = n8x0_menelaus_late_init,
3521 };
3522
3523diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524index 79f49d9..70bf184 100644
3525--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3526+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3527@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3528 void (*resume)(void);
3529 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3530 void (*hotplug_restart)(void);
3531-};
3532+} __no_const;
3533
3534 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3535 static struct powerdomain *mpuss_pd;
3536@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3537 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3538 {}
3539
3540-struct cpu_pm_ops omap_pm_ops = {
3541+static struct cpu_pm_ops omap_pm_ops __read_only = {
3542 .finish_suspend = default_finish_suspend,
3543 .resume = dummy_cpu_resume,
3544 .scu_prepare = dummy_scu_prepare,
3545diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3546index 5305ec7..6d74045 100644
3547--- a/arch/arm/mach-omap2/omap-smp.c
3548+++ b/arch/arm/mach-omap2/omap-smp.c
3549@@ -19,6 +19,7 @@
3550 #include <linux/device.h>
3551 #include <linux/smp.h>
3552 #include <linux/io.h>
3553+#include <linux/irq.h>
3554 #include <linux/irqchip/arm-gic.h>
3555
3556 #include <asm/smp_scu.h>
3557diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3558index f961c46..4a453dc 100644
3559--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3560+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3561@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3562 return NOTIFY_OK;
3563 }
3564
3565-static struct notifier_block __refdata irq_hotplug_notifier = {
3566+static struct notifier_block irq_hotplug_notifier = {
3567 .notifier_call = irq_cpu_hotplug_notify,
3568 };
3569
3570diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3571index be9541e..821805f 100644
3572--- a/arch/arm/mach-omap2/omap_device.c
3573+++ b/arch/arm/mach-omap2/omap_device.c
3574@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3575 struct platform_device __init *omap_device_build(const char *pdev_name,
3576 int pdev_id,
3577 struct omap_hwmod *oh,
3578- void *pdata, int pdata_len)
3579+ const void *pdata, int pdata_len)
3580 {
3581 struct omap_hwmod *ohs[] = { oh };
3582
3583@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3584 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3585 int pdev_id,
3586 struct omap_hwmod **ohs,
3587- int oh_cnt, void *pdata,
3588+ int oh_cnt, const void *pdata,
3589 int pdata_len)
3590 {
3591 int ret = -ENOMEM;
3592diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3593index 78c02b3..c94109a 100644
3594--- a/arch/arm/mach-omap2/omap_device.h
3595+++ b/arch/arm/mach-omap2/omap_device.h
3596@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3597 /* Core code interface */
3598
3599 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3600- struct omap_hwmod *oh, void *pdata,
3601+ struct omap_hwmod *oh, const void *pdata,
3602 int pdata_len);
3603
3604 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3605 struct omap_hwmod **oh, int oh_cnt,
3606- void *pdata, int pdata_len);
3607+ const void *pdata, int pdata_len);
3608
3609 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3610 struct omap_hwmod **ohs, int oh_cnt);
3611diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3612index 9025fff..3555702 100644
3613--- a/arch/arm/mach-omap2/omap_hwmod.c
3614+++ b/arch/arm/mach-omap2/omap_hwmod.c
3615@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3616 int (*init_clkdm)(struct omap_hwmod *oh);
3617 void (*update_context_lost)(struct omap_hwmod *oh);
3618 int (*get_context_lost)(struct omap_hwmod *oh);
3619-};
3620+} __no_const;
3621
3622 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3623-static struct omap_hwmod_soc_ops soc_ops;
3624+static struct omap_hwmod_soc_ops soc_ops __read_only;
3625
3626 /* omap_hwmod_list contains all registered struct omap_hwmods */
3627 static LIST_HEAD(omap_hwmod_list);
3628diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629index 95fee54..cfa9cf1 100644
3630--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3631+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3632@@ -10,6 +10,7 @@
3633
3634 #include <linux/kernel.h>
3635 #include <linux/init.h>
3636+#include <asm/pgtable.h>
3637
3638 #include "powerdomain.h"
3639
3640@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3641
3642 void __init am43xx_powerdomains_init(void)
3643 {
3644- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3645+ pax_open_kernel();
3646+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3647+ pax_close_kernel();
3648 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3649 pwrdm_register_pwrdms(powerdomains_am43xx);
3650 pwrdm_complete_init();
3651diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3652index ff0a68c..b312aa0 100644
3653--- a/arch/arm/mach-omap2/wd_timer.c
3654+++ b/arch/arm/mach-omap2/wd_timer.c
3655@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3656 struct omap_hwmod *oh;
3657 char *oh_name = "wd_timer2";
3658 char *dev_name = "omap_wdt";
3659- struct omap_wd_timer_platform_data pdata;
3660+ static struct omap_wd_timer_platform_data pdata = {
3661+ .read_reset_sources = prm_read_reset_sources
3662+ };
3663
3664 if (!cpu_class_is_omap2() || of_have_populated_dt())
3665 return 0;
3666@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3667 return -EINVAL;
3668 }
3669
3670- pdata.read_reset_sources = prm_read_reset_sources;
3671-
3672 pdev = omap_device_build(dev_name, id, oh, &pdata,
3673 sizeof(struct omap_wd_timer_platform_data));
3674 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3675diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676index 4f25a7c..a81be85 100644
3677--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3678+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3679@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3680 bool entered_lp2 = false;
3681
3682 if (tegra_pending_sgi())
3683- ACCESS_ONCE(abort_flag) = true;
3684+ ACCESS_ONCE_RW(abort_flag) = true;
3685
3686 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3687
3688diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3689index ab95f53..4b977a7 100644
3690--- a/arch/arm/mach-tegra/irq.c
3691+++ b/arch/arm/mach-tegra/irq.c
3692@@ -20,6 +20,7 @@
3693 #include <linux/cpu_pm.h>
3694 #include <linux/interrupt.h>
3695 #include <linux/io.h>
3696+#include <linux/irq.h>
3697 #include <linux/irqchip/arm-gic.h>
3698 #include <linux/irq.h>
3699 #include <linux/kernel.h>
3700diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3701index 2cb587b..6ddfebf 100644
3702--- a/arch/arm/mach-ux500/pm.c
3703+++ b/arch/arm/mach-ux500/pm.c
3704@@ -10,6 +10,7 @@
3705 */
3706
3707 #include <linux/kernel.h>
3708+#include <linux/irq.h>
3709 #include <linux/irqchip/arm-gic.h>
3710 #include <linux/delay.h>
3711 #include <linux/io.h>
3712diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3713index 2dea8b5..6499da2 100644
3714--- a/arch/arm/mach-ux500/setup.h
3715+++ b/arch/arm/mach-ux500/setup.h
3716@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3717 .type = MT_DEVICE, \
3718 }
3719
3720-#define __MEM_DEV_DESC(x, sz) { \
3721- .virtual = IO_ADDRESS(x), \
3722- .pfn = __phys_to_pfn(x), \
3723- .length = sz, \
3724- .type = MT_MEMORY_RWX, \
3725-}
3726-
3727 extern struct smp_operations ux500_smp_ops;
3728 extern void ux500_cpu_die(unsigned int cpu);
3729
3730diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3731index 52d768f..5f93180 100644
3732--- a/arch/arm/mach-zynq/platsmp.c
3733+++ b/arch/arm/mach-zynq/platsmp.c
3734@@ -24,6 +24,7 @@
3735 #include <linux/io.h>
3736 #include <asm/cacheflush.h>
3737 #include <asm/smp_scu.h>
3738+#include <linux/irq.h>
3739 #include <linux/irqchip/arm-gic.h>
3740 #include "common.h"
3741
3742diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3743index c43c714..4f8f7b9 100644
3744--- a/arch/arm/mm/Kconfig
3745+++ b/arch/arm/mm/Kconfig
3746@@ -446,6 +446,7 @@ config CPU_32v5
3747
3748 config CPU_32v6
3749 bool
3750+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3751 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3752
3753 config CPU_32v6K
3754@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3755
3756 config CPU_USE_DOMAINS
3757 bool
3758+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3759 help
3760 This option enables or disables the use of domain switching
3761 via the set_fs() function.
3762@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3763
3764 config KUSER_HELPERS
3765 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3766- depends on MMU
3767+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3768 default y
3769 help
3770 Warning: disabling this option may break user programs.
3771@@ -812,7 +814,7 @@ config KUSER_HELPERS
3772 See Documentation/arm/kernel_user_helpers.txt for details.
3773
3774 However, the fixed address nature of these helpers can be used
3775- by ROP (return orientated programming) authors when creating
3776+ by ROP (Return Oriented Programming) authors when creating
3777 exploits.
3778
3779 If all of the binaries and libraries which run on your platform
3780diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3781index 2c0c541..4585df9 100644
3782--- a/arch/arm/mm/alignment.c
3783+++ b/arch/arm/mm/alignment.c
3784@@ -216,10 +216,12 @@ union offset_union {
3785 #define __get16_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 8 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 8); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -233,6 +235,7 @@ union offset_union {
3798 #define __get32_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v, a = addr; \
3801+ pax_open_userland(); \
3802 __get8_unaligned_check(ins,v,a,err); \
3803 val = v << ((BE) ? 24 : 0); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805@@ -241,6 +244,7 @@ union offset_union {
3806 val |= v << ((BE) ? 8 : 16); \
3807 __get8_unaligned_check(ins,v,a,err); \
3808 val |= v << ((BE) ? 0 : 24); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -254,6 +258,7 @@ union offset_union {
3814 #define __put16_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_16 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -273,6 +278,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829@@ -286,6 +292,7 @@ union offset_union {
3830 #define __put32_unaligned_check(ins,val,addr) \
3831 do { \
3832 unsigned int err = 0, v = val, a = addr; \
3833+ pax_open_userland(); \
3834 __asm__( FIRST_BYTE_32 \
3835 ARM( "1: "ins" %1, [%2], #1\n" ) \
3836 THUMB( "1: "ins" %1, [%2]\n" ) \
3837@@ -315,6 +322,7 @@ union offset_union {
3838 " .popsection\n" \
3839 : "=r" (err), "=&r" (v), "=&r" (a) \
3840 : "0" (err), "1" (v), "2" (a)); \
3841+ pax_close_userland(); \
3842 if (err) \
3843 goto fault; \
3844 } while (0)
3845diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3846index 5e65ca8..879e7b3 100644
3847--- a/arch/arm/mm/cache-l2x0.c
3848+++ b/arch/arm/mm/cache-l2x0.c
3849@@ -42,7 +42,7 @@ struct l2c_init_data {
3850 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3851 void (*save)(void __iomem *);
3852 struct outer_cache_fns outer_cache;
3853-};
3854+} __do_const;
3855
3856 #define CACHE_LINE_SIZE 32
3857
3858diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3859index 845769e..4278fd7 100644
3860--- a/arch/arm/mm/context.c
3861+++ b/arch/arm/mm/context.c
3862@@ -43,7 +43,7 @@
3863 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3864
3865 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3866-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3867+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3868 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3869
3870 static DEFINE_PER_CPU(atomic64_t, active_asids);
3871@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3872 {
3873 static u32 cur_idx = 1;
3874 u64 asid = atomic64_read(&mm->context.id);
3875- u64 generation = atomic64_read(&asid_generation);
3876+ u64 generation = atomic64_read_unchecked(&asid_generation);
3877
3878 if (asid != 0) {
3879 /*
3880@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3881 */
3882 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3883 if (asid == NUM_USER_ASIDS) {
3884- generation = atomic64_add_return(ASID_FIRST_VERSION,
3885+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3886 &asid_generation);
3887 flush_context(cpu);
3888 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3889@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3890 cpu_set_reserved_ttbr0();
3891
3892 asid = atomic64_read(&mm->context.id);
3893- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3894+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3895 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3896 goto switch_mm_fastpath;
3897
3898 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3899 /* Check that our ASID belongs to the current generation. */
3900 asid = atomic64_read(&mm->context.id);
3901- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3902+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3903 asid = new_context(mm, cpu);
3904 atomic64_set(&mm->context.id, asid);
3905 }
3906diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3907index a982dc3..2d9f5f7 100644
3908--- a/arch/arm/mm/fault.c
3909+++ b/arch/arm/mm/fault.c
3910@@ -25,6 +25,7 @@
3911 #include <asm/system_misc.h>
3912 #include <asm/system_info.h>
3913 #include <asm/tlbflush.h>
3914+#include <asm/sections.h>
3915
3916 #include "fault.h"
3917
3918@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3919 if (fixup_exception(regs))
3920 return;
3921
3922+#ifdef CONFIG_PAX_MEMORY_UDEREF
3923+ if (addr < TASK_SIZE) {
3924+ if (current->signal->curr_ip)
3925+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ else
3928+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3930+ }
3931+#endif
3932+
3933+#ifdef CONFIG_PAX_KERNEXEC
3934+ if ((fsr & FSR_WRITE) &&
3935+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3936+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3937+ {
3938+ if (current->signal->curr_ip)
3939+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ else
3942+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3944+ }
3945+#endif
3946+
3947 /*
3948 * No handler, we'll have to terminate things with extreme prejudice.
3949 */
3950@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3951 }
3952 #endif
3953
3954+#ifdef CONFIG_PAX_PAGEEXEC
3955+ if (fsr & FSR_LNX_PF) {
3956+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3957+ do_group_exit(SIGKILL);
3958+ }
3959+#endif
3960+
3961 tsk->thread.address = addr;
3962 tsk->thread.error_code = fsr;
3963 tsk->thread.trap_no = 14;
3964@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3965 }
3966 #endif /* CONFIG_MMU */
3967
3968+#ifdef CONFIG_PAX_PAGEEXEC
3969+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3970+{
3971+ long i;
3972+
3973+ printk(KERN_ERR "PAX: bytes at PC: ");
3974+ for (i = 0; i < 20; i++) {
3975+ unsigned char c;
3976+ if (get_user(c, (__force unsigned char __user *)pc+i))
3977+ printk(KERN_CONT "?? ");
3978+ else
3979+ printk(KERN_CONT "%02x ", c);
3980+ }
3981+ printk("\n");
3982+
3983+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3984+ for (i = -1; i < 20; i++) {
3985+ unsigned long c;
3986+ if (get_user(c, (__force unsigned long __user *)sp+i))
3987+ printk(KERN_CONT "???????? ");
3988+ else
3989+ printk(KERN_CONT "%08lx ", c);
3990+ }
3991+ printk("\n");
3992+}
3993+#endif
3994+
3995 /*
3996 * First Level Translation Fault Handler
3997 *
3998@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3999 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4000 struct siginfo info;
4001
4002+#ifdef CONFIG_PAX_MEMORY_UDEREF
4003+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4004+ if (current->signal->curr_ip)
4005+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ else
4008+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4009+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4010+ goto die;
4011+ }
4012+#endif
4013+
4014 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4015 return;
4016
4017+die:
4018 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4019 inf->name, fsr, addr);
4020
4021@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4022 ifsr_info[nr].name = name;
4023 }
4024
4025+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4026+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4027+
4028 asmlinkage void __exception
4029 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4030 {
4031 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4032 struct siginfo info;
4033+ unsigned long pc = instruction_pointer(regs);
4034+
4035+ if (user_mode(regs)) {
4036+ unsigned long sigpage = current->mm->context.sigpage;
4037+
4038+ if (sigpage <= pc && pc < sigpage + 7*4) {
4039+ if (pc < sigpage + 3*4)
4040+ sys_sigreturn(regs);
4041+ else
4042+ sys_rt_sigreturn(regs);
4043+ return;
4044+ }
4045+ if (pc == 0xffff0f60UL) {
4046+ /*
4047+ * PaX: __kuser_cmpxchg64 emulation
4048+ */
4049+ // TODO
4050+ //regs->ARM_pc = regs->ARM_lr;
4051+ //return;
4052+ }
4053+ if (pc == 0xffff0fa0UL) {
4054+ /*
4055+ * PaX: __kuser_memory_barrier emulation
4056+ */
4057+ // dmb(); implied by the exception
4058+ regs->ARM_pc = regs->ARM_lr;
4059+ return;
4060+ }
4061+ if (pc == 0xffff0fc0UL) {
4062+ /*
4063+ * PaX: __kuser_cmpxchg emulation
4064+ */
4065+ // TODO
4066+ //long new;
4067+ //int op;
4068+
4069+ //op = FUTEX_OP_SET << 28;
4070+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4071+ //regs->ARM_r0 = old != new;
4072+ //regs->ARM_pc = regs->ARM_lr;
4073+ //return;
4074+ }
4075+ if (pc == 0xffff0fe0UL) {
4076+ /*
4077+ * PaX: __kuser_get_tls emulation
4078+ */
4079+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4080+ regs->ARM_pc = regs->ARM_lr;
4081+ return;
4082+ }
4083+ }
4084+
4085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4086+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4087+ if (current->signal->curr_ip)
4088+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4089+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4090+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4091+ else
4092+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4093+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4094+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4095+ goto die;
4096+ }
4097+#endif
4098+
4099+#ifdef CONFIG_PAX_REFCOUNT
4100+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4101+#ifdef CONFIG_THUMB2_KERNEL
4102+ unsigned short bkpt;
4103+
4104+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4105+#else
4106+ unsigned int bkpt;
4107+
4108+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4109+#endif
4110+ current->thread.error_code = ifsr;
4111+ current->thread.trap_no = 0;
4112+ pax_report_refcount_overflow(regs);
4113+ fixup_exception(regs);
4114+ return;
4115+ }
4116+ }
4117+#endif
4118
4119 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4120 return;
4121
4122+die:
4123 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4124 inf->name, ifsr, addr);
4125
4126diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4127index cf08bdf..772656c 100644
4128--- a/arch/arm/mm/fault.h
4129+++ b/arch/arm/mm/fault.h
4130@@ -3,6 +3,7 @@
4131
4132 /*
4133 * Fault status register encodings. We steal bit 31 for our own purposes.
4134+ * Set when the FSR value is from an instruction fault.
4135 */
4136 #define FSR_LNX_PF (1 << 31)
4137 #define FSR_WRITE (1 << 11)
4138@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4139 }
4140 #endif
4141
4142+/* valid for LPAE and !LPAE */
4143+static inline int is_xn_fault(unsigned int fsr)
4144+{
4145+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4146+}
4147+
4148+static inline int is_domain_fault(unsigned int fsr)
4149+{
4150+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4151+}
4152+
4153 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4154 unsigned long search_exception_table(unsigned long addr);
4155
4156diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4157index 2495c8c..415b7fc 100644
4158--- a/arch/arm/mm/init.c
4159+++ b/arch/arm/mm/init.c
4160@@ -758,7 +758,46 @@ void free_tcmmem(void)
4161 {
4162 #ifdef CONFIG_HAVE_TCM
4163 extern char __tcm_start, __tcm_end;
4164+#endif
4165
4166+#ifdef CONFIG_PAX_KERNEXEC
4167+ unsigned long addr;
4168+ pgd_t *pgd;
4169+ pud_t *pud;
4170+ pmd_t *pmd;
4171+ int cpu_arch = cpu_architecture();
4172+ unsigned int cr = get_cr();
4173+
4174+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4175+ /* make pages tables, etc before .text NX */
4176+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4177+ pgd = pgd_offset_k(addr);
4178+ pud = pud_offset(pgd, addr);
4179+ pmd = pmd_offset(pud, addr);
4180+ __section_update(pmd, addr, PMD_SECT_XN);
4181+ }
4182+ /* make init NX */
4183+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4184+ pgd = pgd_offset_k(addr);
4185+ pud = pud_offset(pgd, addr);
4186+ pmd = pmd_offset(pud, addr);
4187+ __section_update(pmd, addr, PMD_SECT_XN);
4188+ }
4189+ /* make kernel code/rodata RX */
4190+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4191+ pgd = pgd_offset_k(addr);
4192+ pud = pud_offset(pgd, addr);
4193+ pmd = pmd_offset(pud, addr);
4194+#ifdef CONFIG_ARM_LPAE
4195+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4196+#else
4197+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4198+#endif
4199+ }
4200+ }
4201+#endif
4202+
4203+#ifdef CONFIG_HAVE_TCM
4204 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4205 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4206 #endif
4207diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4208index d1e5ad7..84dcbf2 100644
4209--- a/arch/arm/mm/ioremap.c
4210+++ b/arch/arm/mm/ioremap.c
4211@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4212 unsigned int mtype;
4213
4214 if (cached)
4215- mtype = MT_MEMORY_RWX;
4216+ mtype = MT_MEMORY_RX;
4217 else
4218- mtype = MT_MEMORY_RWX_NONCACHED;
4219+ mtype = MT_MEMORY_RX_NONCACHED;
4220
4221 return __arm_ioremap_caller(phys_addr, size, mtype,
4222 __builtin_return_address(0));
4223diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4224index 5e85ed3..b10a7ed 100644
4225--- a/arch/arm/mm/mmap.c
4226+++ b/arch/arm/mm/mmap.c
4227@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4228 struct vm_area_struct *vma;
4229 int do_align = 0;
4230 int aliasing = cache_is_vipt_aliasing();
4231+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4232 struct vm_unmapped_area_info info;
4233
4234 /*
4235@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 if (len > TASK_SIZE)
4237 return -ENOMEM;
4238
4239+#ifdef CONFIG_PAX_RANDMMAP
4240+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4241+#endif
4242+
4243 if (addr) {
4244 if (do_align)
4245 addr = COLOUR_ALIGN(addr, pgoff);
4246@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4247 addr = PAGE_ALIGN(addr);
4248
4249 vma = find_vma(mm, addr);
4250- if (TASK_SIZE - len >= addr &&
4251- (!vma || addr + len <= vma->vm_start))
4252+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4253 return addr;
4254 }
4255
4256@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4257 info.high_limit = TASK_SIZE;
4258 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4259 info.align_offset = pgoff << PAGE_SHIFT;
4260+ info.threadstack_offset = offset;
4261 return vm_unmapped_area(&info);
4262 }
4263
4264@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4265 unsigned long addr = addr0;
4266 int do_align = 0;
4267 int aliasing = cache_is_vipt_aliasing();
4268+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4269 struct vm_unmapped_area_info info;
4270
4271 /*
4272@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 return addr;
4274 }
4275
4276+#ifdef CONFIG_PAX_RANDMMAP
4277+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4278+#endif
4279+
4280 /* requesting a specific address */
4281 if (addr) {
4282 if (do_align)
4283@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4284 else
4285 addr = PAGE_ALIGN(addr);
4286 vma = find_vma(mm, addr);
4287- if (TASK_SIZE - len >= addr &&
4288- (!vma || addr + len <= vma->vm_start))
4289+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4290 return addr;
4291 }
4292
4293@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4294 info.high_limit = mm->mmap_base;
4295 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4296 info.align_offset = pgoff << PAGE_SHIFT;
4297+ info.threadstack_offset = offset;
4298 addr = vm_unmapped_area(&info);
4299
4300 /*
4301@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4302 {
4303 unsigned long random_factor = 0UL;
4304
4305+#ifdef CONFIG_PAX_RANDMMAP
4306+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4307+#endif
4308+
4309 /* 8 bits of randomness in 20 address space bits */
4310 if ((current->flags & PF_RANDOMIZE) &&
4311 !(current->personality & ADDR_NO_RANDOMIZE))
4312@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4313
4314 if (mmap_is_legacy()) {
4315 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4316+
4317+#ifdef CONFIG_PAX_RANDMMAP
4318+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4319+ mm->mmap_base += mm->delta_mmap;
4320+#endif
4321+
4322 mm->get_unmapped_area = arch_get_unmapped_area;
4323 } else {
4324 mm->mmap_base = mmap_base(random_factor);
4325+
4326+#ifdef CONFIG_PAX_RANDMMAP
4327+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4328+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4329+#endif
4330+
4331 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4332 }
4333 }
4334diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4335index 4e6ef89..21c27f2 100644
4336--- a/arch/arm/mm/mmu.c
4337+++ b/arch/arm/mm/mmu.c
4338@@ -41,6 +41,22 @@
4339 #include "mm.h"
4340 #include "tcm.h"
4341
4342+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4343+void modify_domain(unsigned int dom, unsigned int type)
4344+{
4345+ struct thread_info *thread = current_thread_info();
4346+ unsigned int domain = thread->cpu_domain;
4347+ /*
4348+ * DOMAIN_MANAGER might be defined to some other value,
4349+ * use the arch-defined constant
4350+ */
4351+ domain &= ~domain_val(dom, 3);
4352+ thread->cpu_domain = domain | domain_val(dom, type);
4353+ set_domain(thread->cpu_domain);
4354+}
4355+EXPORT_SYMBOL(modify_domain);
4356+#endif
4357+
4358 /*
4359 * empty_zero_page is a special page that is used for
4360 * zero-initialized data and COW.
4361@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4362 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4363 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4364
4365-static struct mem_type mem_types[] = {
4366+#ifdef CONFIG_PAX_KERNEXEC
4367+#define L_PTE_KERNEXEC L_PTE_RDONLY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4369+#else
4370+#define L_PTE_KERNEXEC L_PTE_DIRTY
4371+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4372+#endif
4373+
4374+static struct mem_type mem_types[] __read_only = {
4375 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4376 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4377 L_PTE_SHARED,
4378@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4379 .prot_sect = PROT_SECT_DEVICE,
4380 .domain = DOMAIN_IO,
4381 },
4382- [MT_UNCACHED] = {
4383+ [MT_UNCACHED_RW] = {
4384 .prot_pte = PROT_PTE_DEVICE,
4385 .prot_l1 = PMD_TYPE_TABLE,
4386 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4387 .domain = DOMAIN_IO,
4388 },
4389- [MT_CACHECLEAN] = {
4390- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4391+ [MT_CACHECLEAN_RO] = {
4392+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4393 .domain = DOMAIN_KERNEL,
4394 },
4395 #ifndef CONFIG_ARM_LPAE
4396- [MT_MINICLEAN] = {
4397- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4398+ [MT_MINICLEAN_RO] = {
4399+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4400 .domain = DOMAIN_KERNEL,
4401 },
4402 #endif
4403@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4404 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4405 L_PTE_RDONLY,
4406 .prot_l1 = PMD_TYPE_TABLE,
4407- .domain = DOMAIN_USER,
4408+ .domain = DOMAIN_VECTORS,
4409 },
4410 [MT_HIGH_VECTORS] = {
4411 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4412 L_PTE_USER | L_PTE_RDONLY,
4413 .prot_l1 = PMD_TYPE_TABLE,
4414- .domain = DOMAIN_USER,
4415+ .domain = DOMAIN_VECTORS,
4416 },
4417- [MT_MEMORY_RWX] = {
4418+ [__MT_MEMORY_RWX] = {
4419 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4420 .prot_l1 = PMD_TYPE_TABLE,
4421 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4422@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4423 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4424 .domain = DOMAIN_KERNEL,
4425 },
4426- [MT_ROM] = {
4427- .prot_sect = PMD_TYPE_SECT,
4428+ [MT_MEMORY_RX] = {
4429+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4430+ .prot_l1 = PMD_TYPE_TABLE,
4431+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4432+ .domain = DOMAIN_KERNEL,
4433+ },
4434+ [MT_ROM_RX] = {
4435+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4436 .domain = DOMAIN_KERNEL,
4437 },
4438- [MT_MEMORY_RWX_NONCACHED] = {
4439+ [MT_MEMORY_RW_NONCACHED] = {
4440 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4441 L_PTE_MT_BUFFERABLE,
4442 .prot_l1 = PMD_TYPE_TABLE,
4443 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4444 .domain = DOMAIN_KERNEL,
4445 },
4446+ [MT_MEMORY_RX_NONCACHED] = {
4447+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4448+ L_PTE_MT_BUFFERABLE,
4449+ .prot_l1 = PMD_TYPE_TABLE,
4450+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4451+ .domain = DOMAIN_KERNEL,
4452+ },
4453 [MT_MEMORY_RW_DTCM] = {
4454 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4455 L_PTE_XN,
4456@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4457 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4458 .domain = DOMAIN_KERNEL,
4459 },
4460- [MT_MEMORY_RWX_ITCM] = {
4461- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4462+ [MT_MEMORY_RX_ITCM] = {
4463+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4464 .prot_l1 = PMD_TYPE_TABLE,
4465+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4466 .domain = DOMAIN_KERNEL,
4467 },
4468 [MT_MEMORY_RW_SO] = {
4469@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4470 * Mark cache clean areas and XIP ROM read only
4471 * from SVC mode and no access from userspace.
4472 */
4473- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4475- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+#ifdef CONFIG_PAX_KERNEXEC
4478+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4479+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481+#endif
4482+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4483+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4484 #endif
4485
4486 /*
4487@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4488 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4489 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4490 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4491- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4492- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4493+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4494+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4495 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4496 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4497+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4498+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4499 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4501- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4502+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4503+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4504+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4506 }
4507 }
4508
4509@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4510 if (cpu_arch >= CPU_ARCH_ARMv6) {
4511 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4512 /* Non-cacheable Normal is XCB = 001 */
4513- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4514+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4515+ PMD_SECT_BUFFERED;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4517 PMD_SECT_BUFFERED;
4518 } else {
4519 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4520- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4521+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4522+ PMD_SECT_TEX(1);
4523+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4524 PMD_SECT_TEX(1);
4525 }
4526 } else {
4527- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4528+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4529+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4530 }
4531
4532 #ifdef CONFIG_ARM_LPAE
4533@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4534 user_pgprot |= PTE_EXT_PXN;
4535 #endif
4536
4537+ user_pgprot |= __supported_pte_mask;
4538+
4539 for (i = 0; i < 16; i++) {
4540 pteval_t v = pgprot_val(protection_map[i]);
4541 protection_map[i] = __pgprot(v | user_pgprot);
4542@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4543
4544 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4546- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4547- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4548+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4549+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4550 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4551 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4552+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4553+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4554 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4555- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4556- mem_types[MT_ROM].prot_sect |= cp->pmd;
4557+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4558+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4559+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4560
4561 switch (cp->pmd) {
4562 case PMD_SECT_WT:
4563- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4564+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4565 break;
4566 case PMD_SECT_WB:
4567 case PMD_SECT_WBWA:
4568- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4569+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4570 break;
4571 }
4572 pr_info("Memory policy: %sData cache %s\n",
4573@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4574 return;
4575 }
4576
4577- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4578+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4579 md->virtual >= PAGE_OFFSET &&
4580 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4581 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4582@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4583 * called function. This means you can't use any function or debugging
4584 * method which may touch any device, otherwise the kernel _will_ crash.
4585 */
4586+
4587+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4588+
4589 static void __init devicemaps_init(const struct machine_desc *mdesc)
4590 {
4591 struct map_desc map;
4592 unsigned long addr;
4593- void *vectors;
4594
4595- /*
4596- * Allocate the vector page early.
4597- */
4598- vectors = early_alloc(PAGE_SIZE * 2);
4599-
4600- early_trap_init(vectors);
4601+ early_trap_init(&vectors);
4602
4603 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4604 pmd_clear(pmd_off_k(addr));
4605@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4606 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4607 map.virtual = MODULES_VADDR;
4608 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4609- map.type = MT_ROM;
4610+ map.type = MT_ROM_RX;
4611 create_mapping(&map);
4612 #endif
4613
4614@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4615 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4616 map.virtual = FLUSH_BASE;
4617 map.length = SZ_1M;
4618- map.type = MT_CACHECLEAN;
4619+ map.type = MT_CACHECLEAN_RO;
4620 create_mapping(&map);
4621 #endif
4622 #ifdef FLUSH_BASE_MINICACHE
4623 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4624 map.virtual = FLUSH_BASE_MINICACHE;
4625 map.length = SZ_1M;
4626- map.type = MT_MINICLEAN;
4627+ map.type = MT_MINICLEAN_RO;
4628 create_mapping(&map);
4629 #endif
4630
4631@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4632 * location (0xffff0000). If we aren't using high-vectors, also
4633 * create a mapping at the low-vectors virtual address.
4634 */
4635- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4636+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4637 map.virtual = 0xffff0000;
4638 map.length = PAGE_SIZE;
4639 #ifdef CONFIG_KUSER_HELPERS
4640@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4641 static void __init map_lowmem(void)
4642 {
4643 struct memblock_region *reg;
4644+#ifndef CONFIG_PAX_KERNEXEC
4645 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4646 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4647+#endif
4648
4649 /* Map all the lowmem memory banks. */
4650 for_each_memblock(memory, reg) {
4651@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4652 if (start >= end)
4653 break;
4654
4655+#ifdef CONFIG_PAX_KERNEXEC
4656+ map.pfn = __phys_to_pfn(start);
4657+ map.virtual = __phys_to_virt(start);
4658+ map.length = end - start;
4659+
4660+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4661+ struct map_desc kernel;
4662+ struct map_desc initmap;
4663+
4664+ /* when freeing initmem we will make this RW */
4665+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4666+ initmap.virtual = (unsigned long)__init_begin;
4667+ initmap.length = _sdata - __init_begin;
4668+ initmap.type = __MT_MEMORY_RWX;
4669+ create_mapping(&initmap);
4670+
4671+ /* when freeing initmem we will make this RX */
4672+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4673+ kernel.virtual = (unsigned long)_stext;
4674+ kernel.length = __init_begin - _stext;
4675+ kernel.type = __MT_MEMORY_RWX;
4676+ create_mapping(&kernel);
4677+
4678+ if (map.virtual < (unsigned long)_stext) {
4679+ map.length = (unsigned long)_stext - map.virtual;
4680+ map.type = __MT_MEMORY_RWX;
4681+ create_mapping(&map);
4682+ }
4683+
4684+ map.pfn = __phys_to_pfn(__pa(_sdata));
4685+ map.virtual = (unsigned long)_sdata;
4686+ map.length = end - __pa(_sdata);
4687+ }
4688+
4689+ map.type = MT_MEMORY_RW;
4690+ create_mapping(&map);
4691+#else
4692 if (end < kernel_x_start) {
4693 map.pfn = __phys_to_pfn(start);
4694 map.virtual = __phys_to_virt(start);
4695 map.length = end - start;
4696- map.type = MT_MEMORY_RWX;
4697+ map.type = __MT_MEMORY_RWX;
4698
4699 create_mapping(&map);
4700 } else if (start >= kernel_x_end) {
4701@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4702 map.pfn = __phys_to_pfn(kernel_x_start);
4703 map.virtual = __phys_to_virt(kernel_x_start);
4704 map.length = kernel_x_end - kernel_x_start;
4705- map.type = MT_MEMORY_RWX;
4706+ map.type = __MT_MEMORY_RWX;
4707
4708 create_mapping(&map);
4709
4710@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4711 create_mapping(&map);
4712 }
4713 }
4714+#endif
4715 }
4716 }
4717
4718diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4719index e1268f9..a9755a7 100644
4720--- a/arch/arm/net/bpf_jit_32.c
4721+++ b/arch/arm/net/bpf_jit_32.c
4722@@ -20,6 +20,7 @@
4723 #include <asm/cacheflush.h>
4724 #include <asm/hwcap.h>
4725 #include <asm/opcodes.h>
4726+#include <asm/pgtable.h>
4727
4728 #include "bpf_jit_32.h"
4729
4730@@ -71,7 +72,11 @@ struct jit_ctx {
4731 #endif
4732 };
4733
4734+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4735+int bpf_jit_enable __read_only;
4736+#else
4737 int bpf_jit_enable __read_mostly;
4738+#endif
4739
4740 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4741 {
4742@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4743 {
4744 u32 *ptr;
4745 /* We are guaranteed to have aligned memory. */
4746+ pax_open_kernel();
4747 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4748 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4749+ pax_close_kernel();
4750 }
4751
4752 static void build_prologue(struct jit_ctx *ctx)
4753diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4754index 5b217f4..c23f40e 100644
4755--- a/arch/arm/plat-iop/setup.c
4756+++ b/arch/arm/plat-iop/setup.c
4757@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4758 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4759 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4760 .length = IOP3XX_PERIPHERAL_SIZE,
4761- .type = MT_UNCACHED,
4762+ .type = MT_UNCACHED_RW,
4763 },
4764 };
4765
4766diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4767index a5bc92d..0bb4730 100644
4768--- a/arch/arm/plat-omap/sram.c
4769+++ b/arch/arm/plat-omap/sram.c
4770@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4771 * Looks like we need to preserve some bootloader code at the
4772 * beginning of SRAM for jumping to flash for reboot to work...
4773 */
4774+ pax_open_kernel();
4775 memset_io(omap_sram_base + omap_sram_skip, 0,
4776 omap_sram_size - omap_sram_skip);
4777+ pax_close_kernel();
4778 }
4779diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780index ce6d763..cfea917 100644
4781--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4782+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4783@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4784 int (*started)(unsigned ch);
4785 int (*flush)(unsigned ch);
4786 int (*stop)(unsigned ch);
4787-};
4788+} __no_const;
4789
4790 extern void *samsung_dmadev_get_ops(void);
4791 extern void *s3c_dma_get_ops(void);
4792diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4793index 7047051..44e8675 100644
4794--- a/arch/arm64/include/asm/atomic.h
4795+++ b/arch/arm64/include/asm/atomic.h
4796@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4797 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4798 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4799
4800+#define atomic64_read_unchecked(v) atomic64_read(v)
4801+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4802+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4803+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4804+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4805+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4806+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4807+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4808+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4809+
4810 #endif
4811 #endif
4812diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4813index a5abb00..9cbca9a 100644
4814--- a/arch/arm64/include/asm/barrier.h
4815+++ b/arch/arm64/include/asm/barrier.h
4816@@ -44,7 +44,7 @@
4817 do { \
4818 compiletime_assert_atomic_type(*p); \
4819 barrier(); \
4820- ACCESS_ONCE(*p) = (v); \
4821+ ACCESS_ONCE_RW(*p) = (v); \
4822 } while (0)
4823
4824 #define smp_load_acquire(p) \
4825diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4826index 09da25b..3ea0d64 100644
4827--- a/arch/arm64/include/asm/percpu.h
4828+++ b/arch/arm64/include/asm/percpu.h
4829@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4830 {
4831 switch (size) {
4832 case 1:
4833- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4834+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4835 break;
4836 case 2:
4837- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4838+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4839 break;
4840 case 4:
4841- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4842+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4843 break;
4844 case 8:
4845- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4846+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4847 break;
4848 default:
4849 BUILD_BUG();
4850diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4851index e20df38..027ede3 100644
4852--- a/arch/arm64/include/asm/pgalloc.h
4853+++ b/arch/arm64/include/asm/pgalloc.h
4854@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4855 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4856 }
4857
4858+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4859+{
4860+ pud_populate(mm, pud, pmd);
4861+}
4862+
4863 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4864
4865 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4866diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4867index 3bf8f4e..5dd5491 100644
4868--- a/arch/arm64/include/asm/uaccess.h
4869+++ b/arch/arm64/include/asm/uaccess.h
4870@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4871 flag; \
4872 })
4873
4874+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4875 #define access_ok(type, addr, size) __range_ok(addr, size)
4876 #define user_addr_max get_fs
4877
4878diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4879index df34a70..5727a75 100644
4880--- a/arch/arm64/mm/dma-mapping.c
4881+++ b/arch/arm64/mm/dma-mapping.c
4882@@ -137,7 +137,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4883 phys_to_page(paddr),
4884 size >> PAGE_SHIFT);
4885 if (!freed)
4886- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4887+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4888 }
4889
4890 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
4891diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4892index c3a58a1..78fbf54 100644
4893--- a/arch/avr32/include/asm/cache.h
4894+++ b/arch/avr32/include/asm/cache.h
4895@@ -1,8 +1,10 @@
4896 #ifndef __ASM_AVR32_CACHE_H
4897 #define __ASM_AVR32_CACHE_H
4898
4899+#include <linux/const.h>
4900+
4901 #define L1_CACHE_SHIFT 5
4902-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4903+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4904
4905 /*
4906 * Memory returned by kmalloc() may be used for DMA, so we must make
4907diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4908index d232888..87c8df1 100644
4909--- a/arch/avr32/include/asm/elf.h
4910+++ b/arch/avr32/include/asm/elf.h
4911@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4912 the loader. We need to make sure that it is out of the way of the program
4913 that it will "exec", and that there is sufficient room for the brk. */
4914
4915-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4916+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4917
4918+#ifdef CONFIG_PAX_ASLR
4919+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4920+
4921+#define PAX_DELTA_MMAP_LEN 15
4922+#define PAX_DELTA_STACK_LEN 15
4923+#endif
4924
4925 /* This yields a mask that user programs can use to figure out what
4926 instruction set this CPU supports. This could be done in user space,
4927diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4928index 479330b..53717a8 100644
4929--- a/arch/avr32/include/asm/kmap_types.h
4930+++ b/arch/avr32/include/asm/kmap_types.h
4931@@ -2,9 +2,9 @@
4932 #define __ASM_AVR32_KMAP_TYPES_H
4933
4934 #ifdef CONFIG_DEBUG_HIGHMEM
4935-# define KM_TYPE_NR 29
4936+# define KM_TYPE_NR 30
4937 #else
4938-# define KM_TYPE_NR 14
4939+# define KM_TYPE_NR 15
4940 #endif
4941
4942 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4943diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4944index d223a8b..69c5210 100644
4945--- a/arch/avr32/mm/fault.c
4946+++ b/arch/avr32/mm/fault.c
4947@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4948
4949 int exception_trace = 1;
4950
4951+#ifdef CONFIG_PAX_PAGEEXEC
4952+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4953+{
4954+ unsigned long i;
4955+
4956+ printk(KERN_ERR "PAX: bytes at PC: ");
4957+ for (i = 0; i < 20; i++) {
4958+ unsigned char c;
4959+ if (get_user(c, (unsigned char *)pc+i))
4960+ printk(KERN_CONT "???????? ");
4961+ else
4962+ printk(KERN_CONT "%02x ", c);
4963+ }
4964+ printk("\n");
4965+}
4966+#endif
4967+
4968 /*
4969 * This routine handles page faults. It determines the address and the
4970 * problem, and then passes it off to one of the appropriate routines.
4971@@ -178,6 +195,16 @@ bad_area:
4972 up_read(&mm->mmap_sem);
4973
4974 if (user_mode(regs)) {
4975+
4976+#ifdef CONFIG_PAX_PAGEEXEC
4977+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4978+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4979+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4980+ do_group_exit(SIGKILL);
4981+ }
4982+ }
4983+#endif
4984+
4985 if (exception_trace && printk_ratelimit())
4986 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4987 "sp %08lx ecr %lu\n",
4988diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4989index 568885a..f8008df 100644
4990--- a/arch/blackfin/include/asm/cache.h
4991+++ b/arch/blackfin/include/asm/cache.h
4992@@ -7,6 +7,7 @@
4993 #ifndef __ARCH_BLACKFIN_CACHE_H
4994 #define __ARCH_BLACKFIN_CACHE_H
4995
4996+#include <linux/const.h>
4997 #include <linux/linkage.h> /* for asmlinkage */
4998
4999 /*
5000@@ -14,7 +15,7 @@
5001 * Blackfin loads 32 bytes for cache
5002 */
5003 #define L1_CACHE_SHIFT 5
5004-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5005+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5006 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5007
5008 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5009diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5010index aea2718..3639a60 100644
5011--- a/arch/cris/include/arch-v10/arch/cache.h
5012+++ b/arch/cris/include/arch-v10/arch/cache.h
5013@@ -1,8 +1,9 @@
5014 #ifndef _ASM_ARCH_CACHE_H
5015 #define _ASM_ARCH_CACHE_H
5016
5017+#include <linux/const.h>
5018 /* Etrax 100LX have 32-byte cache-lines. */
5019-#define L1_CACHE_BYTES 32
5020 #define L1_CACHE_SHIFT 5
5021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5022
5023 #endif /* _ASM_ARCH_CACHE_H */
5024diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5025index 7caf25d..ee65ac5 100644
5026--- a/arch/cris/include/arch-v32/arch/cache.h
5027+++ b/arch/cris/include/arch-v32/arch/cache.h
5028@@ -1,11 +1,12 @@
5029 #ifndef _ASM_CRIS_ARCH_CACHE_H
5030 #define _ASM_CRIS_ARCH_CACHE_H
5031
5032+#include <linux/const.h>
5033 #include <arch/hwregs/dma.h>
5034
5035 /* A cache-line is 32 bytes. */
5036-#define L1_CACHE_BYTES 32
5037 #define L1_CACHE_SHIFT 5
5038+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5039
5040 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5041
5042diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5043index 102190a..5334cea 100644
5044--- a/arch/frv/include/asm/atomic.h
5045+++ b/arch/frv/include/asm/atomic.h
5046@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5047 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5048 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5049
5050+#define atomic64_read_unchecked(v) atomic64_read(v)
5051+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5052+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5053+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5054+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5055+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5056+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5057+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5058+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5059+
5060 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5061 {
5062 int c, old;
5063diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5064index 2797163..c2a401df9 100644
5065--- a/arch/frv/include/asm/cache.h
5066+++ b/arch/frv/include/asm/cache.h
5067@@ -12,10 +12,11 @@
5068 #ifndef __ASM_CACHE_H
5069 #define __ASM_CACHE_H
5070
5071+#include <linux/const.h>
5072
5073 /* bytes per L1 cache line */
5074 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5075-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5076+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5077
5078 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5079 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5080diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5081index 43901f2..0d8b865 100644
5082--- a/arch/frv/include/asm/kmap_types.h
5083+++ b/arch/frv/include/asm/kmap_types.h
5084@@ -2,6 +2,6 @@
5085 #ifndef _ASM_KMAP_TYPES_H
5086 #define _ASM_KMAP_TYPES_H
5087
5088-#define KM_TYPE_NR 17
5089+#define KM_TYPE_NR 18
5090
5091 #endif
5092diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5093index 836f147..4cf23f5 100644
5094--- a/arch/frv/mm/elf-fdpic.c
5095+++ b/arch/frv/mm/elf-fdpic.c
5096@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5097 {
5098 struct vm_area_struct *vma;
5099 struct vm_unmapped_area_info info;
5100+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5101
5102 if (len > TASK_SIZE)
5103 return -ENOMEM;
5104@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5105 if (addr) {
5106 addr = PAGE_ALIGN(addr);
5107 vma = find_vma(current->mm, addr);
5108- if (TASK_SIZE - len >= addr &&
5109- (!vma || addr + len <= vma->vm_start))
5110+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5111 goto success;
5112 }
5113
5114@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5115 info.high_limit = (current->mm->start_stack - 0x00200000);
5116 info.align_mask = 0;
5117 info.align_offset = 0;
5118+ info.threadstack_offset = offset;
5119 addr = vm_unmapped_area(&info);
5120 if (!(addr & ~PAGE_MASK))
5121 goto success;
5122diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5123index 69952c1..4fa2908 100644
5124--- a/arch/hexagon/include/asm/cache.h
5125+++ b/arch/hexagon/include/asm/cache.h
5126@@ -21,9 +21,11 @@
5127 #ifndef __ASM_CACHE_H
5128 #define __ASM_CACHE_H
5129
5130+#include <linux/const.h>
5131+
5132 /* Bytes per L1 cache line */
5133-#define L1_CACHE_SHIFT (5)
5134-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5135+#define L1_CACHE_SHIFT 5
5136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5137
5138 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5139
5140diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5141index 074e52b..76afdac 100644
5142--- a/arch/ia64/Kconfig
5143+++ b/arch/ia64/Kconfig
5144@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5145 config KEXEC
5146 bool "kexec system call"
5147 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5148+ depends on !GRKERNSEC_KMEM
5149 help
5150 kexec is a system call that implements the ability to shutdown your
5151 current kernel, and to start another kernel. It is like a reboot
5152diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5153index 970d0bd..e750b9b 100644
5154--- a/arch/ia64/Makefile
5155+++ b/arch/ia64/Makefile
5156@@ -98,5 +98,6 @@ endef
5157 archprepare: make_nr_irqs_h FORCE
5158 PHONY += make_nr_irqs_h FORCE
5159
5160+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5161 make_nr_irqs_h: FORCE
5162 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5163diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5164index 0bf0350..2ad1957 100644
5165--- a/arch/ia64/include/asm/atomic.h
5166+++ b/arch/ia64/include/asm/atomic.h
5167@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5168 #define atomic64_inc(v) atomic64_add(1, (v))
5169 #define atomic64_dec(v) atomic64_sub(1, (v))
5170
5171+#define atomic64_read_unchecked(v) atomic64_read(v)
5172+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5173+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5174+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5175+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5176+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5177+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5178+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5179+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5180+
5181 #endif /* _ASM_IA64_ATOMIC_H */
5182diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5183index f6769eb..1cdb590 100644
5184--- a/arch/ia64/include/asm/barrier.h
5185+++ b/arch/ia64/include/asm/barrier.h
5186@@ -66,7 +66,7 @@
5187 do { \
5188 compiletime_assert_atomic_type(*p); \
5189 barrier(); \
5190- ACCESS_ONCE(*p) = (v); \
5191+ ACCESS_ONCE_RW(*p) = (v); \
5192 } while (0)
5193
5194 #define smp_load_acquire(p) \
5195diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5196index 988254a..e1ee885 100644
5197--- a/arch/ia64/include/asm/cache.h
5198+++ b/arch/ia64/include/asm/cache.h
5199@@ -1,6 +1,7 @@
5200 #ifndef _ASM_IA64_CACHE_H
5201 #define _ASM_IA64_CACHE_H
5202
5203+#include <linux/const.h>
5204
5205 /*
5206 * Copyright (C) 1998-2000 Hewlett-Packard Co
5207@@ -9,7 +10,7 @@
5208
5209 /* Bytes per L1 (data) cache line. */
5210 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5211-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5213
5214 #ifdef CONFIG_SMP
5215 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5216diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5217index 5a83c5c..4d7f553 100644
5218--- a/arch/ia64/include/asm/elf.h
5219+++ b/arch/ia64/include/asm/elf.h
5220@@ -42,6 +42,13 @@
5221 */
5222 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5223
5224+#ifdef CONFIG_PAX_ASLR
5225+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5226+
5227+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5228+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5229+#endif
5230+
5231 #define PT_IA_64_UNWIND 0x70000001
5232
5233 /* IA-64 relocations: */
5234diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5235index 5767cdf..7462574 100644
5236--- a/arch/ia64/include/asm/pgalloc.h
5237+++ b/arch/ia64/include/asm/pgalloc.h
5238@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5239 pgd_val(*pgd_entry) = __pa(pud);
5240 }
5241
5242+static inline void
5243+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5244+{
5245+ pgd_populate(mm, pgd_entry, pud);
5246+}
5247+
5248 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5249 {
5250 return quicklist_alloc(0, GFP_KERNEL, NULL);
5251@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5252 pud_val(*pud_entry) = __pa(pmd);
5253 }
5254
5255+static inline void
5256+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5257+{
5258+ pud_populate(mm, pud_entry, pmd);
5259+}
5260+
5261 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5262 {
5263 return quicklist_alloc(0, GFP_KERNEL, NULL);
5264diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5265index 7935115..c0eca6a 100644
5266--- a/arch/ia64/include/asm/pgtable.h
5267+++ b/arch/ia64/include/asm/pgtable.h
5268@@ -12,7 +12,7 @@
5269 * David Mosberger-Tang <davidm@hpl.hp.com>
5270 */
5271
5272-
5273+#include <linux/const.h>
5274 #include <asm/mman.h>
5275 #include <asm/page.h>
5276 #include <asm/processor.h>
5277@@ -142,6 +142,17 @@
5278 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5279 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5280 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5281+
5282+#ifdef CONFIG_PAX_PAGEEXEC
5283+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5284+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5285+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5286+#else
5287+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5288+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5289+# define PAGE_COPY_NOEXEC PAGE_COPY
5290+#endif
5291+
5292 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5293 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5294 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5295diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5296index 45698cd..e8e2dbc 100644
5297--- a/arch/ia64/include/asm/spinlock.h
5298+++ b/arch/ia64/include/asm/spinlock.h
5299@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5300 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5301
5302 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5303- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5304+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5305 }
5306
5307 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5308diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5309index 103bedc..0210597 100644
5310--- a/arch/ia64/include/asm/uaccess.h
5311+++ b/arch/ia64/include/asm/uaccess.h
5312@@ -70,6 +70,7 @@
5313 && ((segment).seg == KERNEL_DS.seg \
5314 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5315 })
5316+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5317 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5318
5319 /*
5320@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5321 static inline unsigned long
5322 __copy_to_user (void __user *to, const void *from, unsigned long count)
5323 {
5324+ if (count > INT_MAX)
5325+ return count;
5326+
5327+ if (!__builtin_constant_p(count))
5328+ check_object_size(from, count, true);
5329+
5330 return __copy_user(to, (__force void __user *) from, count);
5331 }
5332
5333 static inline unsigned long
5334 __copy_from_user (void *to, const void __user *from, unsigned long count)
5335 {
5336+ if (count > INT_MAX)
5337+ return count;
5338+
5339+ if (!__builtin_constant_p(count))
5340+ check_object_size(to, count, false);
5341+
5342 return __copy_user((__force void __user *) to, from, count);
5343 }
5344
5345@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5346 ({ \
5347 void __user *__cu_to = (to); \
5348 const void *__cu_from = (from); \
5349- long __cu_len = (n); \
5350+ unsigned long __cu_len = (n); \
5351 \
5352- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5353+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5354+ if (!__builtin_constant_p(n)) \
5355+ check_object_size(__cu_from, __cu_len, true); \
5356 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5357+ } \
5358 __cu_len; \
5359 })
5360
5361@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5362 ({ \
5363 void *__cu_to = (to); \
5364 const void __user *__cu_from = (from); \
5365- long __cu_len = (n); \
5366+ unsigned long __cu_len = (n); \
5367 \
5368 __chk_user_ptr(__cu_from); \
5369- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5370+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5371+ if (!__builtin_constant_p(n)) \
5372+ check_object_size(__cu_to, __cu_len, false); \
5373 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5374+ } \
5375 __cu_len; \
5376 })
5377
5378diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5379index 29754aa..06d2838 100644
5380--- a/arch/ia64/kernel/module.c
5381+++ b/arch/ia64/kernel/module.c
5382@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5383 }
5384
5385 static inline int
5386+in_init_rx (const struct module *mod, uint64_t addr)
5387+{
5388+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5389+}
5390+
5391+static inline int
5392+in_init_rw (const struct module *mod, uint64_t addr)
5393+{
5394+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5395+}
5396+
5397+static inline int
5398 in_init (const struct module *mod, uint64_t addr)
5399 {
5400- return addr - (uint64_t) mod->module_init < mod->init_size;
5401+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5402+}
5403+
5404+static inline int
5405+in_core_rx (const struct module *mod, uint64_t addr)
5406+{
5407+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5408+}
5409+
5410+static inline int
5411+in_core_rw (const struct module *mod, uint64_t addr)
5412+{
5413+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5414 }
5415
5416 static inline int
5417 in_core (const struct module *mod, uint64_t addr)
5418 {
5419- return addr - (uint64_t) mod->module_core < mod->core_size;
5420+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5421 }
5422
5423 static inline int
5424@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5425 break;
5426
5427 case RV_BDREL:
5428- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5429+ if (in_init_rx(mod, val))
5430+ val -= (uint64_t) mod->module_init_rx;
5431+ else if (in_init_rw(mod, val))
5432+ val -= (uint64_t) mod->module_init_rw;
5433+ else if (in_core_rx(mod, val))
5434+ val -= (uint64_t) mod->module_core_rx;
5435+ else if (in_core_rw(mod, val))
5436+ val -= (uint64_t) mod->module_core_rw;
5437 break;
5438
5439 case RV_LTV:
5440@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5441 * addresses have been selected...
5442 */
5443 uint64_t gp;
5444- if (mod->core_size > MAX_LTOFF)
5445+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5446 /*
5447 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5448 * at the end of the module.
5449 */
5450- gp = mod->core_size - MAX_LTOFF / 2;
5451+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5452 else
5453- gp = mod->core_size / 2;
5454- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5455+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5456+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5457 mod->arch.gp = gp;
5458 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5459 }
5460diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5461index c39c3cd..3c77738 100644
5462--- a/arch/ia64/kernel/palinfo.c
5463+++ b/arch/ia64/kernel/palinfo.c
5464@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5465 return NOTIFY_OK;
5466 }
5467
5468-static struct notifier_block __refdata palinfo_cpu_notifier =
5469+static struct notifier_block palinfo_cpu_notifier =
5470 {
5471 .notifier_call = palinfo_cpu_callback,
5472 .priority = 0,
5473diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5474index 41e33f8..65180b2a 100644
5475--- a/arch/ia64/kernel/sys_ia64.c
5476+++ b/arch/ia64/kernel/sys_ia64.c
5477@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5478 unsigned long align_mask = 0;
5479 struct mm_struct *mm = current->mm;
5480 struct vm_unmapped_area_info info;
5481+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5482
5483 if (len > RGN_MAP_LIMIT)
5484 return -ENOMEM;
5485@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5486 if (REGION_NUMBER(addr) == RGN_HPAGE)
5487 addr = 0;
5488 #endif
5489+
5490+#ifdef CONFIG_PAX_RANDMMAP
5491+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5492+ addr = mm->free_area_cache;
5493+ else
5494+#endif
5495+
5496 if (!addr)
5497 addr = TASK_UNMAPPED_BASE;
5498
5499@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5500 info.high_limit = TASK_SIZE;
5501 info.align_mask = align_mask;
5502 info.align_offset = 0;
5503+ info.threadstack_offset = offset;
5504 return vm_unmapped_area(&info);
5505 }
5506
5507diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5508index 84f8a52..7c76178 100644
5509--- a/arch/ia64/kernel/vmlinux.lds.S
5510+++ b/arch/ia64/kernel/vmlinux.lds.S
5511@@ -192,7 +192,7 @@ SECTIONS {
5512 /* Per-cpu data: */
5513 . = ALIGN(PERCPU_PAGE_SIZE);
5514 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5515- __phys_per_cpu_start = __per_cpu_load;
5516+ __phys_per_cpu_start = per_cpu_load;
5517 /*
5518 * ensure percpu data fits
5519 * into percpu page size
5520diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5521index ba5ba7a..36e9d3a 100644
5522--- a/arch/ia64/mm/fault.c
5523+++ b/arch/ia64/mm/fault.c
5524@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5525 return pte_present(pte);
5526 }
5527
5528+#ifdef CONFIG_PAX_PAGEEXEC
5529+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5530+{
5531+ unsigned long i;
5532+
5533+ printk(KERN_ERR "PAX: bytes at PC: ");
5534+ for (i = 0; i < 8; i++) {
5535+ unsigned int c;
5536+ if (get_user(c, (unsigned int *)pc+i))
5537+ printk(KERN_CONT "???????? ");
5538+ else
5539+ printk(KERN_CONT "%08x ", c);
5540+ }
5541+ printk("\n");
5542+}
5543+#endif
5544+
5545 # define VM_READ_BIT 0
5546 # define VM_WRITE_BIT 1
5547 # define VM_EXEC_BIT 2
5548@@ -151,8 +168,21 @@ retry:
5549 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5550 goto bad_area;
5551
5552- if ((vma->vm_flags & mask) != mask)
5553+ if ((vma->vm_flags & mask) != mask) {
5554+
5555+#ifdef CONFIG_PAX_PAGEEXEC
5556+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5557+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5558+ goto bad_area;
5559+
5560+ up_read(&mm->mmap_sem);
5561+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5562+ do_group_exit(SIGKILL);
5563+ }
5564+#endif
5565+
5566 goto bad_area;
5567+ }
5568
5569 /*
5570 * If for any reason at all we couldn't handle the fault, make
5571diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5572index 76069c1..c2aa816 100644
5573--- a/arch/ia64/mm/hugetlbpage.c
5574+++ b/arch/ia64/mm/hugetlbpage.c
5575@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5576 unsigned long pgoff, unsigned long flags)
5577 {
5578 struct vm_unmapped_area_info info;
5579+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5580
5581 if (len > RGN_MAP_LIMIT)
5582 return -ENOMEM;
5583@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5584 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5585 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5586 info.align_offset = 0;
5587+ info.threadstack_offset = offset;
5588 return vm_unmapped_area(&info);
5589 }
5590
5591diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5592index 6b33457..88b5124 100644
5593--- a/arch/ia64/mm/init.c
5594+++ b/arch/ia64/mm/init.c
5595@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5596 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5597 vma->vm_end = vma->vm_start + PAGE_SIZE;
5598 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5599+
5600+#ifdef CONFIG_PAX_PAGEEXEC
5601+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5602+ vma->vm_flags &= ~VM_EXEC;
5603+
5604+#ifdef CONFIG_PAX_MPROTECT
5605+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5606+ vma->vm_flags &= ~VM_MAYEXEC;
5607+#endif
5608+
5609+ }
5610+#endif
5611+
5612 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5613 down_write(&current->mm->mmap_sem);
5614 if (insert_vm_struct(current->mm, vma)) {
5615@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5616 gate_vma.vm_start = FIXADDR_USER_START;
5617 gate_vma.vm_end = FIXADDR_USER_END;
5618 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5619- gate_vma.vm_page_prot = __P101;
5620+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5621
5622 return 0;
5623 }
5624diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5625index 40b3ee98..8c2c112 100644
5626--- a/arch/m32r/include/asm/cache.h
5627+++ b/arch/m32r/include/asm/cache.h
5628@@ -1,8 +1,10 @@
5629 #ifndef _ASM_M32R_CACHE_H
5630 #define _ASM_M32R_CACHE_H
5631
5632+#include <linux/const.h>
5633+
5634 /* L1 cache line size */
5635 #define L1_CACHE_SHIFT 4
5636-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5637+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5638
5639 #endif /* _ASM_M32R_CACHE_H */
5640diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5641index 82abd15..d95ae5d 100644
5642--- a/arch/m32r/lib/usercopy.c
5643+++ b/arch/m32r/lib/usercopy.c
5644@@ -14,6 +14,9 @@
5645 unsigned long
5646 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5647 {
5648+ if ((long)n < 0)
5649+ return n;
5650+
5651 prefetch(from);
5652 if (access_ok(VERIFY_WRITE, to, n))
5653 __copy_user(to,from,n);
5654@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5655 unsigned long
5656 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5657 {
5658+ if ((long)n < 0)
5659+ return n;
5660+
5661 prefetchw(to);
5662 if (access_ok(VERIFY_READ, from, n))
5663 __copy_user_zeroing(to,from,n);
5664diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5665index 0395c51..5f26031 100644
5666--- a/arch/m68k/include/asm/cache.h
5667+++ b/arch/m68k/include/asm/cache.h
5668@@ -4,9 +4,11 @@
5669 #ifndef __ARCH_M68K_CACHE_H
5670 #define __ARCH_M68K_CACHE_H
5671
5672+#include <linux/const.h>
5673+
5674 /* bytes per L1 cache line */
5675 #define L1_CACHE_SHIFT 4
5676-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5677+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5678
5679 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5680
5681diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5682index d703d8e..a8e2d70 100644
5683--- a/arch/metag/include/asm/barrier.h
5684+++ b/arch/metag/include/asm/barrier.h
5685@@ -90,7 +90,7 @@ static inline void fence(void)
5686 do { \
5687 compiletime_assert_atomic_type(*p); \
5688 smp_mb(); \
5689- ACCESS_ONCE(*p) = (v); \
5690+ ACCESS_ONCE_RW(*p) = (v); \
5691 } while (0)
5692
5693 #define smp_load_acquire(p) \
5694diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5695index 3c32075..ae0ae75 100644
5696--- a/arch/metag/mm/hugetlbpage.c
5697+++ b/arch/metag/mm/hugetlbpage.c
5698@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5699 info.high_limit = TASK_SIZE;
5700 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5701 info.align_offset = 0;
5702+ info.threadstack_offset = 0;
5703 return vm_unmapped_area(&info);
5704 }
5705
5706diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5707index 4efe96a..60e8699 100644
5708--- a/arch/microblaze/include/asm/cache.h
5709+++ b/arch/microblaze/include/asm/cache.h
5710@@ -13,11 +13,12 @@
5711 #ifndef _ASM_MICROBLAZE_CACHE_H
5712 #define _ASM_MICROBLAZE_CACHE_H
5713
5714+#include <linux/const.h>
5715 #include <asm/registers.h>
5716
5717 #define L1_CACHE_SHIFT 5
5718 /* word-granular cache in microblaze */
5719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5721
5722 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5723
5724diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5725index 843713c..b6a87b9 100644
5726--- a/arch/mips/Kconfig
5727+++ b/arch/mips/Kconfig
5728@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5729
5730 config KEXEC
5731 bool "Kexec system call"
5732+ depends on !GRKERNSEC_KMEM
5733 help
5734 kexec is a system call that implements the ability to shutdown your
5735 current kernel, and to start another kernel. It is like a reboot
5736diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5737index 3778655..1dff0a9 100644
5738--- a/arch/mips/cavium-octeon/dma-octeon.c
5739+++ b/arch/mips/cavium-octeon/dma-octeon.c
5740@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5741 if (dma_release_from_coherent(dev, order, vaddr))
5742 return;
5743
5744- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5745+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5746 }
5747
5748 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5749diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5750index 857da84..3f4458b 100644
5751--- a/arch/mips/include/asm/atomic.h
5752+++ b/arch/mips/include/asm/atomic.h
5753@@ -22,15 +22,39 @@
5754 #include <asm/cmpxchg.h>
5755 #include <asm/war.h>
5756
5757+#ifdef CONFIG_GENERIC_ATOMIC64
5758+#include <asm-generic/atomic64.h>
5759+#endif
5760+
5761 #define ATOMIC_INIT(i) { (i) }
5762
5763+#ifdef CONFIG_64BIT
5764+#define _ASM_EXTABLE(from, to) \
5765+" .section __ex_table,\"a\"\n" \
5766+" .dword " #from ", " #to"\n" \
5767+" .previous\n"
5768+#else
5769+#define _ASM_EXTABLE(from, to) \
5770+" .section __ex_table,\"a\"\n" \
5771+" .word " #from ", " #to"\n" \
5772+" .previous\n"
5773+#endif
5774+
5775 /*
5776 * atomic_read - read atomic variable
5777 * @v: pointer of type atomic_t
5778 *
5779 * Atomically reads the value of @v.
5780 */
5781-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5782+static inline int atomic_read(const atomic_t *v)
5783+{
5784+ return ACCESS_ONCE(v->counter);
5785+}
5786+
5787+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5788+{
5789+ return ACCESS_ONCE(v->counter);
5790+}
5791
5792 /*
5793 * atomic_set - set atomic variable
5794@@ -39,47 +63,77 @@
5795 *
5796 * Atomically sets the value of @v to @i.
5797 */
5798-#define atomic_set(v, i) ((v)->counter = (i))
5799+static inline void atomic_set(atomic_t *v, int i)
5800+{
5801+ v->counter = i;
5802+}
5803
5804-#define ATOMIC_OP(op, c_op, asm_op) \
5805-static __inline__ void atomic_##op(int i, atomic_t * v) \
5806+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5807+{
5808+ v->counter = i;
5809+}
5810+
5811+#ifdef CONFIG_PAX_REFCOUNT
5812+#define __OVERFLOW_POST \
5813+ " b 4f \n" \
5814+ " .set noreorder \n" \
5815+ "3: b 5f \n" \
5816+ " move %0, %1 \n" \
5817+ " .set reorder \n"
5818+#define __OVERFLOW_EXTABLE \
5819+ "3:\n" \
5820+ _ASM_EXTABLE(2b, 3b)
5821+#else
5822+#define __OVERFLOW_POST
5823+#define __OVERFLOW_EXTABLE
5824+#endif
5825+
5826+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5827+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5828 { \
5829 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5830 int temp; \
5831 \
5832 __asm__ __volatile__( \
5833- " .set arch=r4000 \n" \
5834- "1: ll %0, %1 # atomic_" #op " \n" \
5835- " " #asm_op " %0, %2 \n" \
5836+ " .set mips3 \n" \
5837+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5838+ "2: " #asm_op " %0, %2 \n" \
5839 " sc %0, %1 \n" \
5840 " beqzl %0, 1b \n" \
5841+ extable \
5842 " .set mips0 \n" \
5843 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5844 : "Ir" (i)); \
5845 } else if (kernel_uses_llsc) { \
5846 int temp; \
5847 \
5848- do { \
5849- __asm__ __volatile__( \
5850- " .set arch=r4000 \n" \
5851- " ll %0, %1 # atomic_" #op "\n" \
5852- " " #asm_op " %0, %2 \n" \
5853- " sc %0, %1 \n" \
5854- " .set mips0 \n" \
5855- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5856- : "Ir" (i)); \
5857- } while (unlikely(!temp)); \
5858+ __asm__ __volatile__( \
5859+ " .set mips3 \n" \
5860+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5861+ "2: " #asm_op " %0, %2 \n" \
5862+ " sc %0, %1 \n" \
5863+ " beqz %0, 1b \n" \
5864+ extable \
5865+ " .set mips0 \n" \
5866+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5867+ : "Ir" (i)); \
5868 } else { \
5869 unsigned long flags; \
5870 \
5871 raw_local_irq_save(flags); \
5872- v->counter c_op i; \
5873+ __asm__ __volatile__( \
5874+ "2: " #asm_op " %0, %1 \n" \
5875+ extable \
5876+ : "+r" (v->counter) : "Ir" (i)); \
5877 raw_local_irq_restore(flags); \
5878 } \
5879 }
5880
5881-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5882-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5883+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5884+ __ATOMIC_OP(op, _unchecked, asm_op)
5885+
5886+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5887+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5888 { \
5889 int result; \
5890 \
5891@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5892 int temp; \
5893 \
5894 __asm__ __volatile__( \
5895- " .set arch=r4000 \n" \
5896- "1: ll %1, %2 # atomic_" #op "_return \n" \
5897- " " #asm_op " %0, %1, %3 \n" \
5898+ " .set mips3 \n" \
5899+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5900+ "2: " #asm_op " %0, %1, %3 \n" \
5901 " sc %0, %2 \n" \
5902 " beqzl %0, 1b \n" \
5903- " " #asm_op " %0, %1, %3 \n" \
5904+ post_op \
5905+ extable \
5906+ "4: " #asm_op " %0, %1, %3 \n" \
5907+ "5: \n" \
5908 " .set mips0 \n" \
5909 : "=&r" (result), "=&r" (temp), \
5910 "+" GCC_OFF12_ASM() (v->counter) \
5911@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5912 } else if (kernel_uses_llsc) { \
5913 int temp; \
5914 \
5915- do { \
5916- __asm__ __volatile__( \
5917- " .set arch=r4000 \n" \
5918- " ll %1, %2 # atomic_" #op "_return \n" \
5919- " " #asm_op " %0, %1, %3 \n" \
5920- " sc %0, %2 \n" \
5921- " .set mips0 \n" \
5922- : "=&r" (result), "=&r" (temp), \
5923- "+" GCC_OFF12_ASM() (v->counter) \
5924- : "Ir" (i)); \
5925- } while (unlikely(!result)); \
5926+ __asm__ __volatile__( \
5927+ " .set mips3 \n" \
5928+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5929+ "2: " #asm_op " %0, %1, %3 \n" \
5930+ " sc %0, %2 \n" \
5931+ post_op \
5932+ extable \
5933+ "4: " #asm_op " %0, %1, %3 \n" \
5934+ "5: \n" \
5935+ " .set mips0 \n" \
5936+ : "=&r" (result), "=&r" (temp), \
5937+ "+" GCC_OFF12_ASM() (v->counter) \
5938+ : "Ir" (i)); \
5939 \
5940 result = temp; result c_op i; \
5941 } else { \
5942 unsigned long flags; \
5943 \
5944 raw_local_irq_save(flags); \
5945- result = v->counter; \
5946- result c_op i; \
5947- v->counter = result; \
5948+ __asm__ __volatile__( \
5949+ " lw %0, %1 \n" \
5950+ "2: " #asm_op " %0, %1, %2 \n" \
5951+ " sw %0, %1 \n" \
5952+ "3: \n" \
5953+ extable \
5954+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5955+ : "Ir" (i)); \
5956 raw_local_irq_restore(flags); \
5957 } \
5958 \
5959@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5960 return result; \
5961 }
5962
5963-#define ATOMIC_OPS(op, c_op, asm_op) \
5964- ATOMIC_OP(op, c_op, asm_op) \
5965- ATOMIC_OP_RETURN(op, c_op, asm_op)
5966+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5967+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5968
5969-ATOMIC_OPS(add, +=, addu)
5970-ATOMIC_OPS(sub, -=, subu)
5971+#define ATOMIC_OPS(op, asm_op) \
5972+ ATOMIC_OP(op, asm_op) \
5973+ ATOMIC_OP_RETURN(op, asm_op)
5974+
5975+ATOMIC_OPS(add, add)
5976+ATOMIC_OPS(sub, sub)
5977
5978 #undef ATOMIC_OPS
5979 #undef ATOMIC_OP_RETURN
5980+#undef __ATOMIC_OP_RETURN
5981 #undef ATOMIC_OP
5982+#undef __ATOMIC_OP
5983
5984 /*
5985 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5986@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5987 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5988 * The function returns the old value of @v minus @i.
5989 */
5990-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5991+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5992 {
5993 int result;
5994
5995@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5996 return result;
5997 }
5998
5999-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6000-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6001+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6002+{
6003+ return cmpxchg(&v->counter, old, new);
6004+}
6005+
6006+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6007+ int new)
6008+{
6009+ return cmpxchg(&(v->counter), old, new);
6010+}
6011+
6012+static inline int atomic_xchg(atomic_t *v, int new)
6013+{
6014+ return xchg(&v->counter, new);
6015+}
6016+
6017+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6018+{
6019+ return xchg(&(v->counter), new);
6020+}
6021
6022 /**
6023 * __atomic_add_unless - add unless the number is a given value
6024@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6025
6026 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6027 #define atomic_inc_return(v) atomic_add_return(1, (v))
6028+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6029+{
6030+ return atomic_add_return_unchecked(1, v);
6031+}
6032
6033 /*
6034 * atomic_sub_and_test - subtract value from variable and test result
6035@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6036 * other cases.
6037 */
6038 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6039+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6040+{
6041+ return atomic_add_return_unchecked(1, v) == 0;
6042+}
6043
6044 /*
6045 * atomic_dec_and_test - decrement by 1 and test
6046@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6047 * Atomically increments @v by 1.
6048 */
6049 #define atomic_inc(v) atomic_add(1, (v))
6050+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6051+{
6052+ atomic_add_unchecked(1, v);
6053+}
6054
6055 /*
6056 * atomic_dec - decrement and test
6057@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6058 * Atomically decrements @v by 1.
6059 */
6060 #define atomic_dec(v) atomic_sub(1, (v))
6061+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6062+{
6063+ atomic_sub_unchecked(1, v);
6064+}
6065
6066 /*
6067 * atomic_add_negative - add and test if negative
6068@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6069 * @v: pointer of type atomic64_t
6070 *
6071 */
6072-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6073+static inline long atomic64_read(const atomic64_t *v)
6074+{
6075+ return ACCESS_ONCE(v->counter);
6076+}
6077+
6078+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6079+{
6080+ return ACCESS_ONCE(v->counter);
6081+}
6082
6083 /*
6084 * atomic64_set - set atomic variable
6085 * @v: pointer of type atomic64_t
6086 * @i: required value
6087 */
6088-#define atomic64_set(v, i) ((v)->counter = (i))
6089+static inline void atomic64_set(atomic64_t *v, long i)
6090+{
6091+ v->counter = i;
6092+}
6093
6094-#define ATOMIC64_OP(op, c_op, asm_op) \
6095-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6096+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6097+{
6098+ v->counter = i;
6099+}
6100+
6101+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6102+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6103 { \
6104 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6105 long temp; \
6106 \
6107 __asm__ __volatile__( \
6108- " .set arch=r4000 \n" \
6109- "1: lld %0, %1 # atomic64_" #op " \n" \
6110- " " #asm_op " %0, %2 \n" \
6111+ " .set mips3 \n" \
6112+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6113+ "2: " #asm_op " %0, %2 \n" \
6114 " scd %0, %1 \n" \
6115 " beqzl %0, 1b \n" \
6116+ extable \
6117 " .set mips0 \n" \
6118 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6119 : "Ir" (i)); \
6120 } else if (kernel_uses_llsc) { \
6121 long temp; \
6122 \
6123- do { \
6124- __asm__ __volatile__( \
6125- " .set arch=r4000 \n" \
6126- " lld %0, %1 # atomic64_" #op "\n" \
6127- " " #asm_op " %0, %2 \n" \
6128- " scd %0, %1 \n" \
6129- " .set mips0 \n" \
6130- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6131- : "Ir" (i)); \
6132- } while (unlikely(!temp)); \
6133+ __asm__ __volatile__( \
6134+ " .set mips3 \n" \
6135+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6136+ "2: " #asm_op " %0, %2 \n" \
6137+ " scd %0, %1 \n" \
6138+ " beqz %0, 1b \n" \
6139+ extable \
6140+ " .set mips0 \n" \
6141+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6142+ : "Ir" (i)); \
6143 } else { \
6144 unsigned long flags; \
6145 \
6146 raw_local_irq_save(flags); \
6147- v->counter c_op i; \
6148+ __asm__ __volatile__( \
6149+ "2: " #asm_op " %0, %1 \n" \
6150+ extable \
6151+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6152 raw_local_irq_restore(flags); \
6153 } \
6154 }
6155
6156-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6157-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6158+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6159+ __ATOMIC64_OP(op, _unchecked, asm_op)
6160+
6161+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6162+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6163 { \
6164 long result; \
6165 \
6166@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6167 long temp; \
6168 \
6169 __asm__ __volatile__( \
6170- " .set arch=r4000 \n" \
6171+ " .set mips3 \n" \
6172 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6173- " " #asm_op " %0, %1, %3 \n" \
6174+ "2: " #asm_op " %0, %1, %3 \n" \
6175 " scd %0, %2 \n" \
6176 " beqzl %0, 1b \n" \
6177- " " #asm_op " %0, %1, %3 \n" \
6178+ post_op \
6179+ extable \
6180+ "4: " #asm_op " %0, %1, %3 \n" \
6181+ "5: \n" \
6182 " .set mips0 \n" \
6183 : "=&r" (result), "=&r" (temp), \
6184 "+" GCC_OFF12_ASM() (v->counter) \
6185@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6186 } else if (kernel_uses_llsc) { \
6187 long temp; \
6188 \
6189- do { \
6190- __asm__ __volatile__( \
6191- " .set arch=r4000 \n" \
6192- " lld %1, %2 # atomic64_" #op "_return\n" \
6193- " " #asm_op " %0, %1, %3 \n" \
6194- " scd %0, %2 \n" \
6195- " .set mips0 \n" \
6196- : "=&r" (result), "=&r" (temp), \
6197- "=" GCC_OFF12_ASM() (v->counter) \
6198- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6199- : "memory"); \
6200- } while (unlikely(!result)); \
6201+ __asm__ __volatile__( \
6202+ " .set mips3 \n" \
6203+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6204+ "2: " #asm_op " %0, %1, %3 \n" \
6205+ " scd %0, %2 \n" \
6206+ " beqz %0, 1b \n" \
6207+ post_op \
6208+ extable \
6209+ "4: " #asm_op " %0, %1, %3 \n" \
6210+ "5: \n" \
6211+ " .set mips0 \n" \
6212+ : "=&r" (result), "=&r" (temp), \
6213+ "=" GCC_OFF12_ASM() (v->counter) \
6214+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6215+ : "memory"); \
6216 \
6217 result = temp; result c_op i; \
6218 } else { \
6219 unsigned long flags; \
6220 \
6221 raw_local_irq_save(flags); \
6222- result = v->counter; \
6223- result c_op i; \
6224- v->counter = result; \
6225+ __asm__ __volatile__( \
6226+ " ld %0, %1 \n" \
6227+ "2: " #asm_op " %0, %1, %2 \n" \
6228+ " sd %0, %1 \n" \
6229+ "3: \n" \
6230+ extable \
6231+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6232+ : "Ir" (i)); \
6233 raw_local_irq_restore(flags); \
6234 } \
6235 \
6236@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6237 return result; \
6238 }
6239
6240-#define ATOMIC64_OPS(op, c_op, asm_op) \
6241- ATOMIC64_OP(op, c_op, asm_op) \
6242- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6243+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6244+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6245
6246-ATOMIC64_OPS(add, +=, daddu)
6247-ATOMIC64_OPS(sub, -=, dsubu)
6248+#define ATOMIC64_OPS(op, asm_op) \
6249+ ATOMIC64_OP(op, asm_op) \
6250+ ATOMIC64_OP_RETURN(op, asm_op)
6251+
6252+ATOMIC64_OPS(add, dadd)
6253+ATOMIC64_OPS(sub, dsub)
6254
6255 #undef ATOMIC64_OPS
6256 #undef ATOMIC64_OP_RETURN
6257+#undef __ATOMIC64_OP_RETURN
6258 #undef ATOMIC64_OP
6259+#undef __ATOMIC64_OP
6260+#undef __OVERFLOW_EXTABLE
6261+#undef __OVERFLOW_POST
6262
6263 /*
6264 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6265@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6266 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6267 * The function returns the old value of @v minus @i.
6268 */
6269-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6270+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6271 {
6272 long result;
6273
6274@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6275 return result;
6276 }
6277
6278-#define atomic64_cmpxchg(v, o, n) \
6279- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6280-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6281+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6282+{
6283+ return cmpxchg(&v->counter, old, new);
6284+}
6285+
6286+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6287+ long new)
6288+{
6289+ return cmpxchg(&(v->counter), old, new);
6290+}
6291+
6292+static inline long atomic64_xchg(atomic64_t *v, long new)
6293+{
6294+ return xchg(&v->counter, new);
6295+}
6296+
6297+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6298+{
6299+ return xchg(&(v->counter), new);
6300+}
6301
6302 /**
6303 * atomic64_add_unless - add unless the number is a given value
6304@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305
6306 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6307 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6308+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6309
6310 /*
6311 * atomic64_sub_and_test - subtract value from variable and test result
6312@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * other cases.
6314 */
6315 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6316+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6317
6318 /*
6319 * atomic64_dec_and_test - decrement by 1 and test
6320@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically increments @v by 1.
6322 */
6323 #define atomic64_inc(v) atomic64_add(1, (v))
6324+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_dec - decrement and test
6328@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6329 * Atomically decrements @v by 1.
6330 */
6331 #define atomic64_dec(v) atomic64_sub(1, (v))
6332+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6333
6334 /*
6335 * atomic64_add_negative - add and test if negative
6336diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6337index 2b8bbbc..4556df6 100644
6338--- a/arch/mips/include/asm/barrier.h
6339+++ b/arch/mips/include/asm/barrier.h
6340@@ -133,7 +133,7 @@
6341 do { \
6342 compiletime_assert_atomic_type(*p); \
6343 smp_mb(); \
6344- ACCESS_ONCE(*p) = (v); \
6345+ ACCESS_ONCE_RW(*p) = (v); \
6346 } while (0)
6347
6348 #define smp_load_acquire(p) \
6349diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6350index b4db69f..8f3b093 100644
6351--- a/arch/mips/include/asm/cache.h
6352+++ b/arch/mips/include/asm/cache.h
6353@@ -9,10 +9,11 @@
6354 #ifndef _ASM_CACHE_H
6355 #define _ASM_CACHE_H
6356
6357+#include <linux/const.h>
6358 #include <kmalloc.h>
6359
6360 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6361-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6362+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6363
6364 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6365 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6366diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6367index eb4d95d..f2f7f93 100644
6368--- a/arch/mips/include/asm/elf.h
6369+++ b/arch/mips/include/asm/elf.h
6370@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6371 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6372 #endif
6373
6374+#ifdef CONFIG_PAX_ASLR
6375+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6376+
6377+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6378+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6379+#endif
6380+
6381 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6382 struct linux_binprm;
6383 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6384 int uses_interp);
6385
6386-struct mm_struct;
6387-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6388-#define arch_randomize_brk arch_randomize_brk
6389-
6390 struct arch_elf_state {
6391 int fp_abi;
6392 int interp_fp_abi;
6393diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6394index c1f6afa..38cc6e9 100644
6395--- a/arch/mips/include/asm/exec.h
6396+++ b/arch/mips/include/asm/exec.h
6397@@ -12,6 +12,6 @@
6398 #ifndef _ASM_EXEC_H
6399 #define _ASM_EXEC_H
6400
6401-extern unsigned long arch_align_stack(unsigned long sp);
6402+#define arch_align_stack(x) ((x) & ~0xfUL)
6403
6404 #endif /* _ASM_EXEC_H */
6405diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6406index 9e8ef59..1139d6b 100644
6407--- a/arch/mips/include/asm/hw_irq.h
6408+++ b/arch/mips/include/asm/hw_irq.h
6409@@ -10,7 +10,7 @@
6410
6411 #include <linux/atomic.h>
6412
6413-extern atomic_t irq_err_count;
6414+extern atomic_unchecked_t irq_err_count;
6415
6416 /*
6417 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6418diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6419index 46dfc3c..a16b13a 100644
6420--- a/arch/mips/include/asm/local.h
6421+++ b/arch/mips/include/asm/local.h
6422@@ -12,15 +12,25 @@ typedef struct
6423 atomic_long_t a;
6424 } local_t;
6425
6426+typedef struct {
6427+ atomic_long_unchecked_t a;
6428+} local_unchecked_t;
6429+
6430 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6431
6432 #define local_read(l) atomic_long_read(&(l)->a)
6433+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6434 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6435+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6436
6437 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6438+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6439 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6440+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6441 #define local_inc(l) atomic_long_inc(&(l)->a)
6442+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6443 #define local_dec(l) atomic_long_dec(&(l)->a)
6444+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6445
6446 /*
6447 * Same as above, but return the result value
6448@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6449 return result;
6450 }
6451
6452+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6453+{
6454+ unsigned long result;
6455+
6456+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6457+ unsigned long temp;
6458+
6459+ __asm__ __volatile__(
6460+ " .set mips3 \n"
6461+ "1:" __LL "%1, %2 # local_add_return \n"
6462+ " addu %0, %1, %3 \n"
6463+ __SC "%0, %2 \n"
6464+ " beqzl %0, 1b \n"
6465+ " addu %0, %1, %3 \n"
6466+ " .set mips0 \n"
6467+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6468+ : "Ir" (i), "m" (l->a.counter)
6469+ : "memory");
6470+ } else if (kernel_uses_llsc) {
6471+ unsigned long temp;
6472+
6473+ __asm__ __volatile__(
6474+ " .set mips3 \n"
6475+ "1:" __LL "%1, %2 # local_add_return \n"
6476+ " addu %0, %1, %3 \n"
6477+ __SC "%0, %2 \n"
6478+ " beqz %0, 1b \n"
6479+ " addu %0, %1, %3 \n"
6480+ " .set mips0 \n"
6481+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6482+ : "Ir" (i), "m" (l->a.counter)
6483+ : "memory");
6484+ } else {
6485+ unsigned long flags;
6486+
6487+ local_irq_save(flags);
6488+ result = l->a.counter;
6489+ result += i;
6490+ l->a.counter = result;
6491+ local_irq_restore(flags);
6492+ }
6493+
6494+ return result;
6495+}
6496+
6497 static __inline__ long local_sub_return(long i, local_t * l)
6498 {
6499 unsigned long result;
6500@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6501
6502 #define local_cmpxchg(l, o, n) \
6503 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6504+#define local_cmpxchg_unchecked(l, o, n) \
6505+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6506 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6507
6508 /**
6509diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6510index 154b70a..426ae3d 100644
6511--- a/arch/mips/include/asm/page.h
6512+++ b/arch/mips/include/asm/page.h
6513@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6514 #ifdef CONFIG_CPU_MIPS32
6515 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6516 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6517- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6518+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6519 #else
6520 typedef struct { unsigned long long pte; } pte_t;
6521 #define pte_val(x) ((x).pte)
6522diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6523index b336037..5b874cc 100644
6524--- a/arch/mips/include/asm/pgalloc.h
6525+++ b/arch/mips/include/asm/pgalloc.h
6526@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6527 {
6528 set_pud(pud, __pud((unsigned long)pmd));
6529 }
6530+
6531+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6532+{
6533+ pud_populate(mm, pud, pmd);
6534+}
6535 #endif
6536
6537 /*
6538diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6539index 845016d..3303268 100644
6540--- a/arch/mips/include/asm/pgtable.h
6541+++ b/arch/mips/include/asm/pgtable.h
6542@@ -20,6 +20,9 @@
6543 #include <asm/io.h>
6544 #include <asm/pgtable-bits.h>
6545
6546+#define ktla_ktva(addr) (addr)
6547+#define ktva_ktla(addr) (addr)
6548+
6549 struct mm_struct;
6550 struct vm_area_struct;
6551
6552diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6553index e4440f9..8fb0005 100644
6554--- a/arch/mips/include/asm/thread_info.h
6555+++ b/arch/mips/include/asm/thread_info.h
6556@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6557 #define TIF_SECCOMP 4 /* secure computing */
6558 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6559 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6560+/* li takes a 32bit immediate */
6561+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6562+
6563 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6564 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6565 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6566@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6567 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6568 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6569 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6570+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6571
6572 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6573 _TIF_SYSCALL_AUDIT | \
6574- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6575+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6576+ _TIF_GRSEC_SETXID)
6577
6578 /* work to do in syscall_trace_leave() */
6579 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6580- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6581+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6582
6583 /* work to do on interrupt/exception return */
6584 #define _TIF_WORK_MASK \
6585@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6586 /* work to do on any return to u-space */
6587 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6588 _TIF_WORK_SYSCALL_EXIT | \
6589- _TIF_SYSCALL_TRACEPOINT)
6590+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6591
6592 /*
6593 * We stash processor id into a COP0 register to retrieve it fast
6594diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6595index bf8b324..cec5705 100644
6596--- a/arch/mips/include/asm/uaccess.h
6597+++ b/arch/mips/include/asm/uaccess.h
6598@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6599 __ok == 0; \
6600 })
6601
6602+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6603 #define access_ok(type, addr, size) \
6604 likely(__access_ok((addr), (size), __access_mask))
6605
6606diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6607index 1188e00..41cf144 100644
6608--- a/arch/mips/kernel/binfmt_elfn32.c
6609+++ b/arch/mips/kernel/binfmt_elfn32.c
6610@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6611 #undef ELF_ET_DYN_BASE
6612 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6613
6614+#ifdef CONFIG_PAX_ASLR
6615+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6616+
6617+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6618+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6619+#endif
6620+
6621 #include <asm/processor.h>
6622 #include <linux/module.h>
6623 #include <linux/elfcore.h>
6624diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6625index 9287678..f870e47 100644
6626--- a/arch/mips/kernel/binfmt_elfo32.c
6627+++ b/arch/mips/kernel/binfmt_elfo32.c
6628@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6629 #undef ELF_ET_DYN_BASE
6630 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6631
6632+#ifdef CONFIG_PAX_ASLR
6633+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6634+
6635+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6636+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6637+#endif
6638+
6639 #include <asm/processor.h>
6640
6641 #include <linux/module.h>
6642diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6643index a74ec3a..4f06f18 100644
6644--- a/arch/mips/kernel/i8259.c
6645+++ b/arch/mips/kernel/i8259.c
6646@@ -202,7 +202,7 @@ spurious_8259A_irq:
6647 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6648 spurious_irq_mask |= irqmask;
6649 }
6650- atomic_inc(&irq_err_count);
6651+ atomic_inc_unchecked(&irq_err_count);
6652 /*
6653 * Theoretically we do not have to handle this IRQ,
6654 * but in Linux this does not cause problems and is
6655diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6656index 44a1f79..2bd6aa3 100644
6657--- a/arch/mips/kernel/irq-gt641xx.c
6658+++ b/arch/mips/kernel/irq-gt641xx.c
6659@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6660 }
6661 }
6662
6663- atomic_inc(&irq_err_count);
6664+ atomic_inc_unchecked(&irq_err_count);
6665 }
6666
6667 void __init gt641xx_irq_init(void)
6668diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6669index d2bfbc2..a8eacd2 100644
6670--- a/arch/mips/kernel/irq.c
6671+++ b/arch/mips/kernel/irq.c
6672@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6673 printk("unexpected IRQ # %d\n", irq);
6674 }
6675
6676-atomic_t irq_err_count;
6677+atomic_unchecked_t irq_err_count;
6678
6679 int arch_show_interrupts(struct seq_file *p, int prec)
6680 {
6681- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6682+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6683 return 0;
6684 }
6685
6686 asmlinkage void spurious_interrupt(void)
6687 {
6688- atomic_inc(&irq_err_count);
6689+ atomic_inc_unchecked(&irq_err_count);
6690 }
6691
6692 void __init init_IRQ(void)
6693@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6694 #endif
6695 }
6696
6697+
6698 #ifdef DEBUG_STACKOVERFLOW
6699+extern void gr_handle_kernel_exploit(void);
6700+
6701 static inline void check_stack_overflow(void)
6702 {
6703 unsigned long sp;
6704@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6705 printk("do_IRQ: stack overflow: %ld\n",
6706 sp - sizeof(struct thread_info));
6707 dump_stack();
6708+ gr_handle_kernel_exploit();
6709 }
6710 }
6711 #else
6712diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6713index 0614717..002fa43 100644
6714--- a/arch/mips/kernel/pm-cps.c
6715+++ b/arch/mips/kernel/pm-cps.c
6716@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6717 nc_core_ready_count = nc_addr;
6718
6719 /* Ensure ready_count is zero-initialised before the assembly runs */
6720- ACCESS_ONCE(*nc_core_ready_count) = 0;
6721+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6722 coupled_barrier(&per_cpu(pm_barrier, core), online);
6723
6724 /* Run the generated entry code */
6725diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6726index 85bff5d..39bc202 100644
6727--- a/arch/mips/kernel/process.c
6728+++ b/arch/mips/kernel/process.c
6729@@ -534,18 +534,6 @@ out:
6730 return pc;
6731 }
6732
6733-/*
6734- * Don't forget that the stack pointer must be aligned on a 8 bytes
6735- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6736- */
6737-unsigned long arch_align_stack(unsigned long sp)
6738-{
6739- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6740- sp -= get_random_int() & ~PAGE_MASK;
6741-
6742- return sp & ALMASK;
6743-}
6744-
6745 static void arch_dump_stack(void *info)
6746 {
6747 struct pt_regs *regs;
6748diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6749index 5104528..950bbdc 100644
6750--- a/arch/mips/kernel/ptrace.c
6751+++ b/arch/mips/kernel/ptrace.c
6752@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6753 return ret;
6754 }
6755
6756+#ifdef CONFIG_GRKERNSEC_SETXID
6757+extern void gr_delayed_cred_worker(void);
6758+#endif
6759+
6760 /*
6761 * Notification of system call entry/exit
6762 * - triggered by current->work.syscall_trace
6763@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6764 tracehook_report_syscall_entry(regs))
6765 ret = -1;
6766
6767+#ifdef CONFIG_GRKERNSEC_SETXID
6768+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6769+ gr_delayed_cred_worker();
6770+#endif
6771+
6772 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6773 trace_sys_enter(regs, regs->regs[2]);
6774
6775diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6776index 07fc524..b9d7f28 100644
6777--- a/arch/mips/kernel/reset.c
6778+++ b/arch/mips/kernel/reset.c
6779@@ -13,6 +13,7 @@
6780 #include <linux/reboot.h>
6781
6782 #include <asm/reboot.h>
6783+#include <asm/bug.h>
6784
6785 /*
6786 * Urgs ... Too many MIPS machines to handle this in a generic way.
6787@@ -29,16 +30,19 @@ void machine_restart(char *command)
6788 {
6789 if (_machine_restart)
6790 _machine_restart(command);
6791+ BUG();
6792 }
6793
6794 void machine_halt(void)
6795 {
6796 if (_machine_halt)
6797 _machine_halt();
6798+ BUG();
6799 }
6800
6801 void machine_power_off(void)
6802 {
6803 if (pm_power_off)
6804 pm_power_off();
6805+ BUG();
6806 }
6807diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6808index 2242bdd..b284048 100644
6809--- a/arch/mips/kernel/sync-r4k.c
6810+++ b/arch/mips/kernel/sync-r4k.c
6811@@ -18,8 +18,8 @@
6812 #include <asm/mipsregs.h>
6813
6814 static atomic_t count_start_flag = ATOMIC_INIT(0);
6815-static atomic_t count_count_start = ATOMIC_INIT(0);
6816-static atomic_t count_count_stop = ATOMIC_INIT(0);
6817+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6818+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6819 static atomic_t count_reference = ATOMIC_INIT(0);
6820
6821 #define COUNTON 100
6822@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6823
6824 for (i = 0; i < NR_LOOPS; i++) {
6825 /* slaves loop on '!= 2' */
6826- while (atomic_read(&count_count_start) != 1)
6827+ while (atomic_read_unchecked(&count_count_start) != 1)
6828 mb();
6829- atomic_set(&count_count_stop, 0);
6830+ atomic_set_unchecked(&count_count_stop, 0);
6831 smp_wmb();
6832
6833 /* this lets the slaves write their count register */
6834- atomic_inc(&count_count_start);
6835+ atomic_inc_unchecked(&count_count_start);
6836
6837 /*
6838 * Everyone initialises count in the last loop:
6839@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6840 /*
6841 * Wait for all slaves to leave the synchronization point:
6842 */
6843- while (atomic_read(&count_count_stop) != 1)
6844+ while (atomic_read_unchecked(&count_count_stop) != 1)
6845 mb();
6846- atomic_set(&count_count_start, 0);
6847+ atomic_set_unchecked(&count_count_start, 0);
6848 smp_wmb();
6849- atomic_inc(&count_count_stop);
6850+ atomic_inc_unchecked(&count_count_stop);
6851 }
6852 /* Arrange for an interrupt in a short while */
6853 write_c0_compare(read_c0_count() + COUNTON);
6854@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6855 initcount = atomic_read(&count_reference);
6856
6857 for (i = 0; i < NR_LOOPS; i++) {
6858- atomic_inc(&count_count_start);
6859- while (atomic_read(&count_count_start) != 2)
6860+ atomic_inc_unchecked(&count_count_start);
6861+ while (atomic_read_unchecked(&count_count_start) != 2)
6862 mb();
6863
6864 /*
6865@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6866 if (i == NR_LOOPS-1)
6867 write_c0_count(initcount);
6868
6869- atomic_inc(&count_count_stop);
6870- while (atomic_read(&count_count_stop) != 2)
6871+ atomic_inc_unchecked(&count_count_stop);
6872+ while (atomic_read_unchecked(&count_count_stop) != 2)
6873 mb();
6874 }
6875 /* Arrange for an interrupt in a short while */
6876diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6877index c3b41e2..46c32e9 100644
6878--- a/arch/mips/kernel/traps.c
6879+++ b/arch/mips/kernel/traps.c
6880@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6881 siginfo_t info;
6882
6883 prev_state = exception_enter();
6884- die_if_kernel("Integer overflow", regs);
6885+ if (unlikely(!user_mode(regs))) {
6886+
6887+#ifdef CONFIG_PAX_REFCOUNT
6888+ if (fixup_exception(regs)) {
6889+ pax_report_refcount_overflow(regs);
6890+ exception_exit(prev_state);
6891+ return;
6892+ }
6893+#endif
6894+
6895+ die("Integer overflow", regs);
6896+ }
6897
6898 info.si_code = FPE_INTOVF;
6899 info.si_signo = SIGFPE;
6900diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6901index 270bbd4..c01932a 100644
6902--- a/arch/mips/kvm/mips.c
6903+++ b/arch/mips/kvm/mips.c
6904@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6905 return r;
6906 }
6907
6908-int kvm_arch_init(void *opaque)
6909+int kvm_arch_init(const void *opaque)
6910 {
6911 if (kvm_mips_callbacks) {
6912 kvm_err("kvm: module already exists\n");
6913diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6914index 70ab5d6..62940fe 100644
6915--- a/arch/mips/mm/fault.c
6916+++ b/arch/mips/mm/fault.c
6917@@ -28,6 +28,23 @@
6918 #include <asm/highmem.h> /* For VMALLOC_END */
6919 #include <linux/kdebug.h>
6920
6921+#ifdef CONFIG_PAX_PAGEEXEC
6922+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6923+{
6924+ unsigned long i;
6925+
6926+ printk(KERN_ERR "PAX: bytes at PC: ");
6927+ for (i = 0; i < 5; i++) {
6928+ unsigned int c;
6929+ if (get_user(c, (unsigned int *)pc+i))
6930+ printk(KERN_CONT "???????? ");
6931+ else
6932+ printk(KERN_CONT "%08x ", c);
6933+ }
6934+ printk("\n");
6935+}
6936+#endif
6937+
6938 /*
6939 * This routine handles page faults. It determines the address,
6940 * and the problem, and then passes it off to one of the appropriate
6941@@ -201,6 +218,14 @@ bad_area:
6942 bad_area_nosemaphore:
6943 /* User mode accesses just cause a SIGSEGV */
6944 if (user_mode(regs)) {
6945+
6946+#ifdef CONFIG_PAX_PAGEEXEC
6947+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6948+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6949+ do_group_exit(SIGKILL);
6950+ }
6951+#endif
6952+
6953 tsk->thread.cp0_badvaddr = address;
6954 tsk->thread.error_code = write;
6955 #if 0
6956diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6957index f1baadd..5472dca 100644
6958--- a/arch/mips/mm/mmap.c
6959+++ b/arch/mips/mm/mmap.c
6960@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 struct vm_area_struct *vma;
6962 unsigned long addr = addr0;
6963 int do_color_align;
6964+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6965 struct vm_unmapped_area_info info;
6966
6967 if (unlikely(len > TASK_SIZE))
6968@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6969 do_color_align = 1;
6970
6971 /* requesting a specific address */
6972+
6973+#ifdef CONFIG_PAX_RANDMMAP
6974+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6975+#endif
6976+
6977 if (addr) {
6978 if (do_color_align)
6979 addr = COLOUR_ALIGN(addr, pgoff);
6980@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6981 addr = PAGE_ALIGN(addr);
6982
6983 vma = find_vma(mm, addr);
6984- if (TASK_SIZE - len >= addr &&
6985- (!vma || addr + len <= vma->vm_start))
6986+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6987 return addr;
6988 }
6989
6990 info.length = len;
6991 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6992 info.align_offset = pgoff << PAGE_SHIFT;
6993+ info.threadstack_offset = offset;
6994
6995 if (dir == DOWN) {
6996 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6997@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6998 {
6999 unsigned long random_factor = 0UL;
7000
7001+#ifdef CONFIG_PAX_RANDMMAP
7002+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7003+#endif
7004+
7005 if (current->flags & PF_RANDOMIZE) {
7006 random_factor = get_random_int();
7007 random_factor = random_factor << PAGE_SHIFT;
7008@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7009
7010 if (mmap_is_legacy()) {
7011 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7012+
7013+#ifdef CONFIG_PAX_RANDMMAP
7014+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7015+ mm->mmap_base += mm->delta_mmap;
7016+#endif
7017+
7018 mm->get_unmapped_area = arch_get_unmapped_area;
7019 } else {
7020 mm->mmap_base = mmap_base(random_factor);
7021+
7022+#ifdef CONFIG_PAX_RANDMMAP
7023+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7024+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7025+#endif
7026+
7027 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7028 }
7029 }
7030
7031-static inline unsigned long brk_rnd(void)
7032-{
7033- unsigned long rnd = get_random_int();
7034-
7035- rnd = rnd << PAGE_SHIFT;
7036- /* 8MB for 32bit, 256MB for 64bit */
7037- if (TASK_IS_32BIT_ADDR)
7038- rnd = rnd & 0x7ffffful;
7039- else
7040- rnd = rnd & 0xffffffful;
7041-
7042- return rnd;
7043-}
7044-
7045-unsigned long arch_randomize_brk(struct mm_struct *mm)
7046-{
7047- unsigned long base = mm->brk;
7048- unsigned long ret;
7049-
7050- ret = PAGE_ALIGN(base + brk_rnd());
7051-
7052- if (ret < mm->brk)
7053- return mm->brk;
7054-
7055- return ret;
7056-}
7057-
7058 int __virt_addr_valid(const volatile void *kaddr)
7059 {
7060 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7061diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7062index d07e041..bedb72b 100644
7063--- a/arch/mips/pci/pci-octeon.c
7064+++ b/arch/mips/pci/pci-octeon.c
7065@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7066
7067
7068 static struct pci_ops octeon_pci_ops = {
7069- octeon_read_config,
7070- octeon_write_config,
7071+ .read = octeon_read_config,
7072+ .write = octeon_write_config,
7073 };
7074
7075 static struct resource octeon_pci_mem_resource = {
7076diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7077index 5e36c33..eb4a17b 100644
7078--- a/arch/mips/pci/pcie-octeon.c
7079+++ b/arch/mips/pci/pcie-octeon.c
7080@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7081 }
7082
7083 static struct pci_ops octeon_pcie0_ops = {
7084- octeon_pcie0_read_config,
7085- octeon_pcie0_write_config,
7086+ .read = octeon_pcie0_read_config,
7087+ .write = octeon_pcie0_write_config,
7088 };
7089
7090 static struct resource octeon_pcie0_mem_resource = {
7091@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7092 };
7093
7094 static struct pci_ops octeon_pcie1_ops = {
7095- octeon_pcie1_read_config,
7096- octeon_pcie1_write_config,
7097+ .read = octeon_pcie1_read_config,
7098+ .write = octeon_pcie1_write_config,
7099 };
7100
7101 static struct resource octeon_pcie1_mem_resource = {
7102@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7103 };
7104
7105 static struct pci_ops octeon_dummy_ops = {
7106- octeon_dummy_read_config,
7107- octeon_dummy_write_config,
7108+ .read = octeon_dummy_read_config,
7109+ .write = octeon_dummy_write_config,
7110 };
7111
7112 static struct resource octeon_dummy_mem_resource = {
7113diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7114index a2358b4..7cead4f 100644
7115--- a/arch/mips/sgi-ip27/ip27-nmi.c
7116+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7117@@ -187,9 +187,9 @@ void
7118 cont_nmi_dump(void)
7119 {
7120 #ifndef REAL_NMI_SIGNAL
7121- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7122+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7123
7124- atomic_inc(&nmied_cpus);
7125+ atomic_inc_unchecked(&nmied_cpus);
7126 #endif
7127 /*
7128 * Only allow 1 cpu to proceed
7129@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7130 udelay(10000);
7131 }
7132 #else
7133- while (atomic_read(&nmied_cpus) != num_online_cpus());
7134+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7135 #endif
7136
7137 /*
7138diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7139index a046b30..6799527 100644
7140--- a/arch/mips/sni/rm200.c
7141+++ b/arch/mips/sni/rm200.c
7142@@ -270,7 +270,7 @@ spurious_8259A_irq:
7143 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7144 spurious_irq_mask |= irqmask;
7145 }
7146- atomic_inc(&irq_err_count);
7147+ atomic_inc_unchecked(&irq_err_count);
7148 /*
7149 * Theoretically we do not have to handle this IRQ,
7150 * but in Linux this does not cause problems and is
7151diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7152index 41e873b..34d33a7 100644
7153--- a/arch/mips/vr41xx/common/icu.c
7154+++ b/arch/mips/vr41xx/common/icu.c
7155@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7156
7157 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7158
7159- atomic_inc(&irq_err_count);
7160+ atomic_inc_unchecked(&irq_err_count);
7161
7162 return -1;
7163 }
7164diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7165index ae0e4ee..e8f0692 100644
7166--- a/arch/mips/vr41xx/common/irq.c
7167+++ b/arch/mips/vr41xx/common/irq.c
7168@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7169 irq_cascade_t *cascade;
7170
7171 if (irq >= NR_IRQS) {
7172- atomic_inc(&irq_err_count);
7173+ atomic_inc_unchecked(&irq_err_count);
7174 return;
7175 }
7176
7177@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7178 ret = cascade->get_irq(irq);
7179 irq = ret;
7180 if (ret < 0)
7181- atomic_inc(&irq_err_count);
7182+ atomic_inc_unchecked(&irq_err_count);
7183 else
7184 irq_dispatch(irq);
7185 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7186diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7187index 967d144..db12197 100644
7188--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7189+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7190@@ -11,12 +11,14 @@
7191 #ifndef _ASM_PROC_CACHE_H
7192 #define _ASM_PROC_CACHE_H
7193
7194+#include <linux/const.h>
7195+
7196 /* L1 cache */
7197
7198 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7199 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7200-#define L1_CACHE_BYTES 16 /* bytes per entry */
7201 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7202+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7203 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7204
7205 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7206diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7207index bcb5df2..84fabd2 100644
7208--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7209+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7210@@ -16,13 +16,15 @@
7211 #ifndef _ASM_PROC_CACHE_H
7212 #define _ASM_PROC_CACHE_H
7213
7214+#include <linux/const.h>
7215+
7216 /*
7217 * L1 cache
7218 */
7219 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7220 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7221-#define L1_CACHE_BYTES 32 /* bytes per entry */
7222 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7223+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7224 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7225
7226 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7227diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7228index 4ce7a01..449202a 100644
7229--- a/arch/openrisc/include/asm/cache.h
7230+++ b/arch/openrisc/include/asm/cache.h
7231@@ -19,11 +19,13 @@
7232 #ifndef __ASM_OPENRISC_CACHE_H
7233 #define __ASM_OPENRISC_CACHE_H
7234
7235+#include <linux/const.h>
7236+
7237 /* FIXME: How can we replace these with values from the CPU...
7238 * they shouldn't be hard-coded!
7239 */
7240
7241-#define L1_CACHE_BYTES 16
7242 #define L1_CACHE_SHIFT 4
7243+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7244
7245 #endif /* __ASM_OPENRISC_CACHE_H */
7246diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7247index 226f8ca..9d9b87d 100644
7248--- a/arch/parisc/include/asm/atomic.h
7249+++ b/arch/parisc/include/asm/atomic.h
7250@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7251 return dec;
7252 }
7253
7254+#define atomic64_read_unchecked(v) atomic64_read(v)
7255+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7256+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7257+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7258+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7259+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7260+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7261+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7262+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7263+
7264 #endif /* !CONFIG_64BIT */
7265
7266
7267diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7268index 47f11c7..3420df2 100644
7269--- a/arch/parisc/include/asm/cache.h
7270+++ b/arch/parisc/include/asm/cache.h
7271@@ -5,6 +5,7 @@
7272 #ifndef __ARCH_PARISC_CACHE_H
7273 #define __ARCH_PARISC_CACHE_H
7274
7275+#include <linux/const.h>
7276
7277 /*
7278 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7279@@ -15,13 +16,13 @@
7280 * just ruin performance.
7281 */
7282 #ifdef CONFIG_PA20
7283-#define L1_CACHE_BYTES 64
7284 #define L1_CACHE_SHIFT 6
7285 #else
7286-#define L1_CACHE_BYTES 32
7287 #define L1_CACHE_SHIFT 5
7288 #endif
7289
7290+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7291+
7292 #ifndef __ASSEMBLY__
7293
7294 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7295diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7296index 3391d06..c23a2cc 100644
7297--- a/arch/parisc/include/asm/elf.h
7298+++ b/arch/parisc/include/asm/elf.h
7299@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7300
7301 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7302
7303+#ifdef CONFIG_PAX_ASLR
7304+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7305+
7306+#define PAX_DELTA_MMAP_LEN 16
7307+#define PAX_DELTA_STACK_LEN 16
7308+#endif
7309+
7310 /* This yields a mask that user programs can use to figure out what
7311 instruction set this CPU supports. This could be done in user space,
7312 but it's not easy, and we've already done it here. */
7313diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7314index f213f5b..0af3e8e 100644
7315--- a/arch/parisc/include/asm/pgalloc.h
7316+++ b/arch/parisc/include/asm/pgalloc.h
7317@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7318 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7319 }
7320
7321+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7322+{
7323+ pgd_populate(mm, pgd, pmd);
7324+}
7325+
7326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7327 {
7328 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7329@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7330 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7331 #define pmd_free(mm, x) do { } while (0)
7332 #define pgd_populate(mm, pmd, pte) BUG()
7333+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7334
7335 #endif
7336
7337diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7338index 22b89d1..ce34230 100644
7339--- a/arch/parisc/include/asm/pgtable.h
7340+++ b/arch/parisc/include/asm/pgtable.h
7341@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7342 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7343 #define PAGE_COPY PAGE_EXECREAD
7344 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7345+
7346+#ifdef CONFIG_PAX_PAGEEXEC
7347+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7348+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7349+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7350+#else
7351+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7352+# define PAGE_COPY_NOEXEC PAGE_COPY
7353+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7354+#endif
7355+
7356 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7357 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7358 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7359diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7360index a5cb070..8604ddc 100644
7361--- a/arch/parisc/include/asm/uaccess.h
7362+++ b/arch/parisc/include/asm/uaccess.h
7363@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7364 const void __user *from,
7365 unsigned long n)
7366 {
7367- int sz = __compiletime_object_size(to);
7368+ size_t sz = __compiletime_object_size(to);
7369 int ret = -EFAULT;
7370
7371- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7372+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7373 ret = __copy_from_user(to, from, n);
7374 else
7375 copy_from_user_overflow();
7376diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7377index 5822e8e..bc5e638 100644
7378--- a/arch/parisc/kernel/module.c
7379+++ b/arch/parisc/kernel/module.c
7380@@ -98,16 +98,38 @@
7381
7382 /* three functions to determine where in the module core
7383 * or init pieces the location is */
7384+static inline int in_init_rx(struct module *me, void *loc)
7385+{
7386+ return (loc >= me->module_init_rx &&
7387+ loc < (me->module_init_rx + me->init_size_rx));
7388+}
7389+
7390+static inline int in_init_rw(struct module *me, void *loc)
7391+{
7392+ return (loc >= me->module_init_rw &&
7393+ loc < (me->module_init_rw + me->init_size_rw));
7394+}
7395+
7396 static inline int in_init(struct module *me, void *loc)
7397 {
7398- return (loc >= me->module_init &&
7399- loc <= (me->module_init + me->init_size));
7400+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7401+}
7402+
7403+static inline int in_core_rx(struct module *me, void *loc)
7404+{
7405+ return (loc >= me->module_core_rx &&
7406+ loc < (me->module_core_rx + me->core_size_rx));
7407+}
7408+
7409+static inline int in_core_rw(struct module *me, void *loc)
7410+{
7411+ return (loc >= me->module_core_rw &&
7412+ loc < (me->module_core_rw + me->core_size_rw));
7413 }
7414
7415 static inline int in_core(struct module *me, void *loc)
7416 {
7417- return (loc >= me->module_core &&
7418- loc <= (me->module_core + me->core_size));
7419+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7420 }
7421
7422 static inline int in_local(struct module *me, void *loc)
7423@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7424 }
7425
7426 /* align things a bit */
7427- me->core_size = ALIGN(me->core_size, 16);
7428- me->arch.got_offset = me->core_size;
7429- me->core_size += gots * sizeof(struct got_entry);
7430+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7431+ me->arch.got_offset = me->core_size_rw;
7432+ me->core_size_rw += gots * sizeof(struct got_entry);
7433
7434- me->core_size = ALIGN(me->core_size, 16);
7435- me->arch.fdesc_offset = me->core_size;
7436- me->core_size += fdescs * sizeof(Elf_Fdesc);
7437+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7438+ me->arch.fdesc_offset = me->core_size_rw;
7439+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7440
7441 me->arch.got_max = gots;
7442 me->arch.fdesc_max = fdescs;
7443@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7444
7445 BUG_ON(value == 0);
7446
7447- got = me->module_core + me->arch.got_offset;
7448+ got = me->module_core_rw + me->arch.got_offset;
7449 for (i = 0; got[i].addr; i++)
7450 if (got[i].addr == value)
7451 goto out;
7452@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7453 #ifdef CONFIG_64BIT
7454 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7455 {
7456- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7457+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7458
7459 if (!value) {
7460 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7461@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7462
7463 /* Create new one */
7464 fdesc->addr = value;
7465- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7466+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7467 return (Elf_Addr)fdesc;
7468 }
7469 #endif /* CONFIG_64BIT */
7470@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7471
7472 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7473 end = table + sechdrs[me->arch.unwind_section].sh_size;
7474- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7475+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7476
7477 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7478 me->arch.unwind_section, table, end, gp);
7479diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7480index e1ffea2..46ed66e 100644
7481--- a/arch/parisc/kernel/sys_parisc.c
7482+++ b/arch/parisc/kernel/sys_parisc.c
7483@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7484 unsigned long task_size = TASK_SIZE;
7485 int do_color_align, last_mmap;
7486 struct vm_unmapped_area_info info;
7487+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7488
7489 if (len > task_size)
7490 return -ENOMEM;
7491@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7492 goto found_addr;
7493 }
7494
7495+#ifdef CONFIG_PAX_RANDMMAP
7496+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7497+#endif
7498+
7499 if (addr) {
7500 if (do_color_align && last_mmap)
7501 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7502@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7503 info.high_limit = mmap_upper_limit();
7504 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7505 info.align_offset = shared_align_offset(last_mmap, pgoff);
7506+ info.threadstack_offset = offset;
7507 addr = vm_unmapped_area(&info);
7508
7509 found_addr:
7510@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7511 unsigned long addr = addr0;
7512 int do_color_align, last_mmap;
7513 struct vm_unmapped_area_info info;
7514+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7515
7516 #ifdef CONFIG_64BIT
7517 /* This should only ever run for 32-bit processes. */
7518@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7519 }
7520
7521 /* requesting a specific address */
7522+#ifdef CONFIG_PAX_RANDMMAP
7523+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7524+#endif
7525+
7526 if (addr) {
7527 if (do_color_align && last_mmap)
7528 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7529@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7530 info.high_limit = mm->mmap_base;
7531 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7532 info.align_offset = shared_align_offset(last_mmap, pgoff);
7533+ info.threadstack_offset = offset;
7534 addr = vm_unmapped_area(&info);
7535 if (!(addr & ~PAGE_MASK))
7536 goto found_addr;
7537@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7538 mm->mmap_legacy_base = mmap_legacy_base();
7539 mm->mmap_base = mmap_upper_limit();
7540
7541+#ifdef CONFIG_PAX_RANDMMAP
7542+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7543+ mm->mmap_legacy_base += mm->delta_mmap;
7544+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7545+ }
7546+#endif
7547+
7548 if (mmap_is_legacy()) {
7549 mm->mmap_base = mm->mmap_legacy_base;
7550 mm->get_unmapped_area = arch_get_unmapped_area;
7551diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7552index 47ee620..1107387 100644
7553--- a/arch/parisc/kernel/traps.c
7554+++ b/arch/parisc/kernel/traps.c
7555@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7556
7557 down_read(&current->mm->mmap_sem);
7558 vma = find_vma(current->mm,regs->iaoq[0]);
7559- if (vma && (regs->iaoq[0] >= vma->vm_start)
7560- && (vma->vm_flags & VM_EXEC)) {
7561-
7562+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7563 fault_address = regs->iaoq[0];
7564 fault_space = regs->iasq[0];
7565
7566diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7567index e5120e6..8ddb5cc 100644
7568--- a/arch/parisc/mm/fault.c
7569+++ b/arch/parisc/mm/fault.c
7570@@ -15,6 +15,7 @@
7571 #include <linux/sched.h>
7572 #include <linux/interrupt.h>
7573 #include <linux/module.h>
7574+#include <linux/unistd.h>
7575
7576 #include <asm/uaccess.h>
7577 #include <asm/traps.h>
7578@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7579 static unsigned long
7580 parisc_acctyp(unsigned long code, unsigned int inst)
7581 {
7582- if (code == 6 || code == 16)
7583+ if (code == 6 || code == 7 || code == 16)
7584 return VM_EXEC;
7585
7586 switch (inst & 0xf0000000) {
7587@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7588 }
7589 #endif
7590
7591+#ifdef CONFIG_PAX_PAGEEXEC
7592+/*
7593+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7594+ *
7595+ * returns 1 when task should be killed
7596+ * 2 when rt_sigreturn trampoline was detected
7597+ * 3 when unpatched PLT trampoline was detected
7598+ */
7599+static int pax_handle_fetch_fault(struct pt_regs *regs)
7600+{
7601+
7602+#ifdef CONFIG_PAX_EMUPLT
7603+ int err;
7604+
7605+ do { /* PaX: unpatched PLT emulation */
7606+ unsigned int bl, depwi;
7607+
7608+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7609+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7610+
7611+ if (err)
7612+ break;
7613+
7614+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7615+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7616+
7617+ err = get_user(ldw, (unsigned int *)addr);
7618+ err |= get_user(bv, (unsigned int *)(addr+4));
7619+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7620+
7621+ if (err)
7622+ break;
7623+
7624+ if (ldw == 0x0E801096U &&
7625+ bv == 0xEAC0C000U &&
7626+ ldw2 == 0x0E881095U)
7627+ {
7628+ unsigned int resolver, map;
7629+
7630+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7631+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7632+ if (err)
7633+ break;
7634+
7635+ regs->gr[20] = instruction_pointer(regs)+8;
7636+ regs->gr[21] = map;
7637+ regs->gr[22] = resolver;
7638+ regs->iaoq[0] = resolver | 3UL;
7639+ regs->iaoq[1] = regs->iaoq[0] + 4;
7640+ return 3;
7641+ }
7642+ }
7643+ } while (0);
7644+#endif
7645+
7646+#ifdef CONFIG_PAX_EMUTRAMP
7647+
7648+#ifndef CONFIG_PAX_EMUSIGRT
7649+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7650+ return 1;
7651+#endif
7652+
7653+ do { /* PaX: rt_sigreturn emulation */
7654+ unsigned int ldi1, ldi2, bel, nop;
7655+
7656+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7657+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7658+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7659+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7660+
7661+ if (err)
7662+ break;
7663+
7664+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7665+ ldi2 == 0x3414015AU &&
7666+ bel == 0xE4008200U &&
7667+ nop == 0x08000240U)
7668+ {
7669+ regs->gr[25] = (ldi1 & 2) >> 1;
7670+ regs->gr[20] = __NR_rt_sigreturn;
7671+ regs->gr[31] = regs->iaoq[1] + 16;
7672+ regs->sr[0] = regs->iasq[1];
7673+ regs->iaoq[0] = 0x100UL;
7674+ regs->iaoq[1] = regs->iaoq[0] + 4;
7675+ regs->iasq[0] = regs->sr[2];
7676+ regs->iasq[1] = regs->sr[2];
7677+ return 2;
7678+ }
7679+ } while (0);
7680+#endif
7681+
7682+ return 1;
7683+}
7684+
7685+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7686+{
7687+ unsigned long i;
7688+
7689+ printk(KERN_ERR "PAX: bytes at PC: ");
7690+ for (i = 0; i < 5; i++) {
7691+ unsigned int c;
7692+ if (get_user(c, (unsigned int *)pc+i))
7693+ printk(KERN_CONT "???????? ");
7694+ else
7695+ printk(KERN_CONT "%08x ", c);
7696+ }
7697+ printk("\n");
7698+}
7699+#endif
7700+
7701 int fixup_exception(struct pt_regs *regs)
7702 {
7703 const struct exception_table_entry *fix;
7704@@ -234,8 +345,33 @@ retry:
7705
7706 good_area:
7707
7708- if ((vma->vm_flags & acc_type) != acc_type)
7709+ if ((vma->vm_flags & acc_type) != acc_type) {
7710+
7711+#ifdef CONFIG_PAX_PAGEEXEC
7712+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7713+ (address & ~3UL) == instruction_pointer(regs))
7714+ {
7715+ up_read(&mm->mmap_sem);
7716+ switch (pax_handle_fetch_fault(regs)) {
7717+
7718+#ifdef CONFIG_PAX_EMUPLT
7719+ case 3:
7720+ return;
7721+#endif
7722+
7723+#ifdef CONFIG_PAX_EMUTRAMP
7724+ case 2:
7725+ return;
7726+#endif
7727+
7728+ }
7729+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7730+ do_group_exit(SIGKILL);
7731+ }
7732+#endif
7733+
7734 goto bad_area;
7735+ }
7736
7737 /*
7738 * If for any reason at all we couldn't handle the fault, make
7739diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7740index a2a168e..e484682 100644
7741--- a/arch/powerpc/Kconfig
7742+++ b/arch/powerpc/Kconfig
7743@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7744 config KEXEC
7745 bool "kexec system call"
7746 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7747+ depends on !GRKERNSEC_KMEM
7748 help
7749 kexec is a system call that implements the ability to shutdown your
7750 current kernel, and to start another kernel. It is like a reboot
7751diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7752index 512d278..d31fadd 100644
7753--- a/arch/powerpc/include/asm/atomic.h
7754+++ b/arch/powerpc/include/asm/atomic.h
7755@@ -12,6 +12,11 @@
7756
7757 #define ATOMIC_INIT(i) { (i) }
7758
7759+#define _ASM_EXTABLE(from, to) \
7760+" .section __ex_table,\"a\"\n" \
7761+ PPC_LONG" " #from ", " #to"\n" \
7762+" .previous\n"
7763+
7764 static __inline__ int atomic_read(const atomic_t *v)
7765 {
7766 int t;
7767@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7768 return t;
7769 }
7770
7771+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7772+{
7773+ int t;
7774+
7775+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7776+
7777+ return t;
7778+}
7779+
7780 static __inline__ void atomic_set(atomic_t *v, int i)
7781 {
7782 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7783 }
7784
7785-#define ATOMIC_OP(op, asm_op) \
7786-static __inline__ void atomic_##op(int a, atomic_t *v) \
7787+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7788+{
7789+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7790+}
7791+
7792+#ifdef CONFIG_PAX_REFCOUNT
7793+#define __REFCOUNT_OP(op) op##o.
7794+#define __OVERFLOW_PRE \
7795+ " mcrxr cr0\n"
7796+#define __OVERFLOW_POST \
7797+ " bf 4*cr0+so, 3f\n" \
7798+ "2: .long 0x00c00b00\n" \
7799+ "3:\n"
7800+#define __OVERFLOW_EXTABLE \
7801+ "\n4:\n"
7802+ _ASM_EXTABLE(2b, 4b)
7803+#else
7804+#define __REFCOUNT_OP(op) op
7805+#define __OVERFLOW_PRE
7806+#define __OVERFLOW_POST
7807+#define __OVERFLOW_EXTABLE
7808+#endif
7809+
7810+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7811+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7812 { \
7813 int t; \
7814 \
7815 __asm__ __volatile__( \
7816-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7817+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7818+ pre_op \
7819 #asm_op " %0,%2,%0\n" \
7820+ post_op \
7821 PPC405_ERR77(0,%3) \
7822 " stwcx. %0,0,%3 \n" \
7823 " bne- 1b\n" \
7824+ extable \
7825 : "=&r" (t), "+m" (v->counter) \
7826 : "r" (a), "r" (&v->counter) \
7827 : "cc"); \
7828 } \
7829
7830-#define ATOMIC_OP_RETURN(op, asm_op) \
7831-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7832+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7833+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7834+
7835+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7836+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7837 { \
7838 int t; \
7839 \
7840 __asm__ __volatile__( \
7841 PPC_ATOMIC_ENTRY_BARRIER \
7842-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7843+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7844+ pre_op \
7845 #asm_op " %0,%1,%0\n" \
7846+ post_op \
7847 PPC405_ERR77(0,%2) \
7848 " stwcx. %0,0,%2 \n" \
7849 " bne- 1b\n" \
7850+ extable \
7851 PPC_ATOMIC_EXIT_BARRIER \
7852 : "=&r" (t) \
7853 : "r" (a), "r" (&v->counter) \
7854@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7855 return t; \
7856 }
7857
7858+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7859+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7860+
7861 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7862
7863 ATOMIC_OPS(add, add)
7864@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7865
7866 #undef ATOMIC_OPS
7867 #undef ATOMIC_OP_RETURN
7868+#undef __ATOMIC_OP_RETURN
7869 #undef ATOMIC_OP
7870+#undef __ATOMIC_OP
7871
7872 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7873
7874-static __inline__ void atomic_inc(atomic_t *v)
7875-{
7876- int t;
7877+/*
7878+ * atomic_inc - increment atomic variable
7879+ * @v: pointer of type atomic_t
7880+ *
7881+ * Automatically increments @v by 1
7882+ */
7883+#define atomic_inc(v) atomic_add(1, (v))
7884+#define atomic_inc_return(v) atomic_add_return(1, (v))
7885
7886- __asm__ __volatile__(
7887-"1: lwarx %0,0,%2 # atomic_inc\n\
7888- addic %0,%0,1\n"
7889- PPC405_ERR77(0,%2)
7890-" stwcx. %0,0,%2 \n\
7891- bne- 1b"
7892- : "=&r" (t), "+m" (v->counter)
7893- : "r" (&v->counter)
7894- : "cc", "xer");
7895+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7896+{
7897+ atomic_add_unchecked(1, v);
7898 }
7899
7900-static __inline__ int atomic_inc_return(atomic_t *v)
7901+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7902 {
7903- int t;
7904-
7905- __asm__ __volatile__(
7906- PPC_ATOMIC_ENTRY_BARRIER
7907-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7908- addic %0,%0,1\n"
7909- PPC405_ERR77(0,%1)
7910-" stwcx. %0,0,%1 \n\
7911- bne- 1b"
7912- PPC_ATOMIC_EXIT_BARRIER
7913- : "=&r" (t)
7914- : "r" (&v->counter)
7915- : "cc", "xer", "memory");
7916-
7917- return t;
7918+ return atomic_add_return_unchecked(1, v);
7919 }
7920
7921 /*
7922@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7923 */
7924 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7925
7926-static __inline__ void atomic_dec(atomic_t *v)
7927+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7928 {
7929- int t;
7930-
7931- __asm__ __volatile__(
7932-"1: lwarx %0,0,%2 # atomic_dec\n\
7933- addic %0,%0,-1\n"
7934- PPC405_ERR77(0,%2)\
7935-" stwcx. %0,0,%2\n\
7936- bne- 1b"
7937- : "=&r" (t), "+m" (v->counter)
7938- : "r" (&v->counter)
7939- : "cc", "xer");
7940+ return atomic_add_return_unchecked(1, v) == 0;
7941 }
7942
7943-static __inline__ int atomic_dec_return(atomic_t *v)
7944+/*
7945+ * atomic_dec - decrement atomic variable
7946+ * @v: pointer of type atomic_t
7947+ *
7948+ * Atomically decrements @v by 1
7949+ */
7950+#define atomic_dec(v) atomic_sub(1, (v))
7951+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7952+
7953+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7954 {
7955- int t;
7956-
7957- __asm__ __volatile__(
7958- PPC_ATOMIC_ENTRY_BARRIER
7959-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7960- addic %0,%0,-1\n"
7961- PPC405_ERR77(0,%1)
7962-" stwcx. %0,0,%1\n\
7963- bne- 1b"
7964- PPC_ATOMIC_EXIT_BARRIER
7965- : "=&r" (t)
7966- : "r" (&v->counter)
7967- : "cc", "xer", "memory");
7968-
7969- return t;
7970+ atomic_sub_unchecked(1, v);
7971 }
7972
7973 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7974 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7975
7976+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7977+{
7978+ return cmpxchg(&(v->counter), old, new);
7979+}
7980+
7981+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7982+{
7983+ return xchg(&(v->counter), new);
7984+}
7985+
7986 /**
7987 * __atomic_add_unless - add unless the number is a given value
7988 * @v: pointer of type atomic_t
7989@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7990 PPC_ATOMIC_ENTRY_BARRIER
7991 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7992 cmpw 0,%0,%3 \n\
7993- beq- 2f \n\
7994- add %0,%2,%0 \n"
7995+ beq- 2f \n"
7996+
7997+#ifdef CONFIG_PAX_REFCOUNT
7998+" mcrxr cr0\n"
7999+" addo. %0,%2,%0\n"
8000+" bf 4*cr0+so, 4f\n"
8001+"3:.long " "0x00c00b00""\n"
8002+"4:\n"
8003+#else
8004+ "add %0,%2,%0 \n"
8005+#endif
8006+
8007 PPC405_ERR77(0,%2)
8008 " stwcx. %0,0,%1 \n\
8009 bne- 1b \n"
8010+"5:"
8011+
8012+#ifdef CONFIG_PAX_REFCOUNT
8013+ _ASM_EXTABLE(3b, 5b)
8014+#endif
8015+
8016 PPC_ATOMIC_EXIT_BARRIER
8017 " subf %0,%2,%0 \n\
8018 2:"
8019@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8020 }
8021 #define atomic_dec_if_positive atomic_dec_if_positive
8022
8023+#define smp_mb__before_atomic_dec() smp_mb()
8024+#define smp_mb__after_atomic_dec() smp_mb()
8025+#define smp_mb__before_atomic_inc() smp_mb()
8026+#define smp_mb__after_atomic_inc() smp_mb()
8027+
8028 #ifdef __powerpc64__
8029
8030 #define ATOMIC64_INIT(i) { (i) }
8031@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8032 return t;
8033 }
8034
8035+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8036+{
8037+ long t;
8038+
8039+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8040+
8041+ return t;
8042+}
8043+
8044 static __inline__ void atomic64_set(atomic64_t *v, long i)
8045 {
8046 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8047 }
8048
8049-#define ATOMIC64_OP(op, asm_op) \
8050-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
8051+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8052+{
8053+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8054+}
8055+
8056+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8057+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8058 { \
8059 long t; \
8060 \
8061 __asm__ __volatile__( \
8062 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8063+ pre_op \
8064 #asm_op " %0,%2,%0\n" \
8065+ post_op \
8066 " stdcx. %0,0,%3 \n" \
8067 " bne- 1b\n" \
8068+ extable \
8069 : "=&r" (t), "+m" (v->counter) \
8070 : "r" (a), "r" (&v->counter) \
8071 : "cc"); \
8072 }
8073
8074-#define ATOMIC64_OP_RETURN(op, asm_op) \
8075-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8076+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8077+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8078+
8079+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8080+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8081 { \
8082 long t; \
8083 \
8084 __asm__ __volatile__( \
8085 PPC_ATOMIC_ENTRY_BARRIER \
8086 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8087+ pre_op \
8088 #asm_op " %0,%1,%0\n" \
8089+ post_op \
8090 " stdcx. %0,0,%2 \n" \
8091 " bne- 1b\n" \
8092+ extable \
8093 PPC_ATOMIC_EXIT_BARRIER \
8094 : "=&r" (t) \
8095 : "r" (a), "r" (&v->counter) \
8096@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8097 return t; \
8098 }
8099
8100+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8101+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8102+
8103 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8104
8105 ATOMIC64_OPS(add, add)
8106@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8107
8108 #undef ATOMIC64_OPS
8109 #undef ATOMIC64_OP_RETURN
8110+#undef __ATOMIC64_OP_RETURN
8111 #undef ATOMIC64_OP
8112+#undef __ATOMIC64_OP
8113+#undef __OVERFLOW_EXTABLE
8114+#undef __OVERFLOW_POST
8115+#undef __OVERFLOW_PRE
8116+#undef __REFCOUNT_OP
8117
8118 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8119
8120-static __inline__ void atomic64_inc(atomic64_t *v)
8121-{
8122- long t;
8123+/*
8124+ * atomic64_inc - increment atomic variable
8125+ * @v: pointer of type atomic64_t
8126+ *
8127+ * Automatically increments @v by 1
8128+ */
8129+#define atomic64_inc(v) atomic64_add(1, (v))
8130+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8131
8132- __asm__ __volatile__(
8133-"1: ldarx %0,0,%2 # atomic64_inc\n\
8134- addic %0,%0,1\n\
8135- stdcx. %0,0,%2 \n\
8136- bne- 1b"
8137- : "=&r" (t), "+m" (v->counter)
8138- : "r" (&v->counter)
8139- : "cc", "xer");
8140+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8141+{
8142+ atomic64_add_unchecked(1, v);
8143 }
8144
8145-static __inline__ long atomic64_inc_return(atomic64_t *v)
8146+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8147 {
8148- long t;
8149-
8150- __asm__ __volatile__(
8151- PPC_ATOMIC_ENTRY_BARRIER
8152-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8153- addic %0,%0,1\n\
8154- stdcx. %0,0,%1 \n\
8155- bne- 1b"
8156- PPC_ATOMIC_EXIT_BARRIER
8157- : "=&r" (t)
8158- : "r" (&v->counter)
8159- : "cc", "xer", "memory");
8160-
8161- return t;
8162+ return atomic64_add_return_unchecked(1, v);
8163 }
8164
8165 /*
8166@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8167 */
8168 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8169
8170-static __inline__ void atomic64_dec(atomic64_t *v)
8171+/*
8172+ * atomic64_dec - decrement atomic variable
8173+ * @v: pointer of type atomic64_t
8174+ *
8175+ * Atomically decrements @v by 1
8176+ */
8177+#define atomic64_dec(v) atomic64_sub(1, (v))
8178+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8179+
8180+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8181 {
8182- long t;
8183-
8184- __asm__ __volatile__(
8185-"1: ldarx %0,0,%2 # atomic64_dec\n\
8186- addic %0,%0,-1\n\
8187- stdcx. %0,0,%2\n\
8188- bne- 1b"
8189- : "=&r" (t), "+m" (v->counter)
8190- : "r" (&v->counter)
8191- : "cc", "xer");
8192-}
8193-
8194-static __inline__ long atomic64_dec_return(atomic64_t *v)
8195-{
8196- long t;
8197-
8198- __asm__ __volatile__(
8199- PPC_ATOMIC_ENTRY_BARRIER
8200-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8201- addic %0,%0,-1\n\
8202- stdcx. %0,0,%1\n\
8203- bne- 1b"
8204- PPC_ATOMIC_EXIT_BARRIER
8205- : "=&r" (t)
8206- : "r" (&v->counter)
8207- : "cc", "xer", "memory");
8208-
8209- return t;
8210+ atomic64_sub_unchecked(1, v);
8211 }
8212
8213 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8214@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8215 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8216 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8217
8218+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8219+{
8220+ return cmpxchg(&(v->counter), old, new);
8221+}
8222+
8223+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8224+{
8225+ return xchg(&(v->counter), new);
8226+}
8227+
8228 /**
8229 * atomic64_add_unless - add unless the number is a given value
8230 * @v: pointer of type atomic64_t
8231@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8232
8233 __asm__ __volatile__ (
8234 PPC_ATOMIC_ENTRY_BARRIER
8235-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8236+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8237 cmpd 0,%0,%3 \n\
8238- beq- 2f \n\
8239- add %0,%2,%0 \n"
8240+ beq- 2f \n"
8241+
8242+#ifdef CONFIG_PAX_REFCOUNT
8243+" mcrxr cr0\n"
8244+" addo. %0,%2,%0\n"
8245+" bf 4*cr0+so, 4f\n"
8246+"3:.long " "0x00c00b00""\n"
8247+"4:\n"
8248+#else
8249+ "add %0,%2,%0 \n"
8250+#endif
8251+
8252 " stdcx. %0,0,%1 \n\
8253 bne- 1b \n"
8254 PPC_ATOMIC_EXIT_BARRIER
8255+"5:"
8256+
8257+#ifdef CONFIG_PAX_REFCOUNT
8258+ _ASM_EXTABLE(3b, 5b)
8259+#endif
8260+
8261 " subf %0,%2,%0 \n\
8262 2:"
8263 : "=&r" (t)
8264diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8265index a3bf5be..e03ba81 100644
8266--- a/arch/powerpc/include/asm/barrier.h
8267+++ b/arch/powerpc/include/asm/barrier.h
8268@@ -76,7 +76,7 @@
8269 do { \
8270 compiletime_assert_atomic_type(*p); \
8271 smp_lwsync(); \
8272- ACCESS_ONCE(*p) = (v); \
8273+ ACCESS_ONCE_RW(*p) = (v); \
8274 } while (0)
8275
8276 #define smp_load_acquire(p) \
8277diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8278index 34a05a1..a1f2c67 100644
8279--- a/arch/powerpc/include/asm/cache.h
8280+++ b/arch/powerpc/include/asm/cache.h
8281@@ -4,6 +4,7 @@
8282 #ifdef __KERNEL__
8283
8284 #include <asm/reg.h>
8285+#include <linux/const.h>
8286
8287 /* bytes per L1 cache line */
8288 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8289@@ -23,7 +24,7 @@
8290 #define L1_CACHE_SHIFT 7
8291 #endif
8292
8293-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8294+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8295
8296 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8297
8298diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8299index 57d289a..b36c98c 100644
8300--- a/arch/powerpc/include/asm/elf.h
8301+++ b/arch/powerpc/include/asm/elf.h
8302@@ -30,6 +30,18 @@
8303
8304 #define ELF_ET_DYN_BASE 0x20000000
8305
8306+#ifdef CONFIG_PAX_ASLR
8307+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8308+
8309+#ifdef __powerpc64__
8310+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8311+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8312+#else
8313+#define PAX_DELTA_MMAP_LEN 15
8314+#define PAX_DELTA_STACK_LEN 15
8315+#endif
8316+#endif
8317+
8318 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8319
8320 /*
8321@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8322 (0x7ff >> (PAGE_SHIFT - 12)) : \
8323 (0x3ffff >> (PAGE_SHIFT - 12)))
8324
8325-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8326-#define arch_randomize_brk arch_randomize_brk
8327-
8328-
8329 #ifdef CONFIG_SPU_BASE
8330 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8331 #define NT_SPU 1
8332diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8333index 8196e9c..d83a9f3 100644
8334--- a/arch/powerpc/include/asm/exec.h
8335+++ b/arch/powerpc/include/asm/exec.h
8336@@ -4,6 +4,6 @@
8337 #ifndef _ASM_POWERPC_EXEC_H
8338 #define _ASM_POWERPC_EXEC_H
8339
8340-extern unsigned long arch_align_stack(unsigned long sp);
8341+#define arch_align_stack(x) ((x) & ~0xfUL)
8342
8343 #endif /* _ASM_POWERPC_EXEC_H */
8344diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8345index 5acabbd..7ea14fa 100644
8346--- a/arch/powerpc/include/asm/kmap_types.h
8347+++ b/arch/powerpc/include/asm/kmap_types.h
8348@@ -10,7 +10,7 @@
8349 * 2 of the License, or (at your option) any later version.
8350 */
8351
8352-#define KM_TYPE_NR 16
8353+#define KM_TYPE_NR 17
8354
8355 #endif /* __KERNEL__ */
8356 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8357diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8358index b8da913..c02b593 100644
8359--- a/arch/powerpc/include/asm/local.h
8360+++ b/arch/powerpc/include/asm/local.h
8361@@ -9,21 +9,65 @@ typedef struct
8362 atomic_long_t a;
8363 } local_t;
8364
8365+typedef struct
8366+{
8367+ atomic_long_unchecked_t a;
8368+} local_unchecked_t;
8369+
8370 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8371
8372 #define local_read(l) atomic_long_read(&(l)->a)
8373+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8374 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8375+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8376
8377 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8378+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8379 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8380+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8381 #define local_inc(l) atomic_long_inc(&(l)->a)
8382+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8383 #define local_dec(l) atomic_long_dec(&(l)->a)
8384+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8385
8386 static __inline__ long local_add_return(long a, local_t *l)
8387 {
8388 long t;
8389
8390 __asm__ __volatile__(
8391+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8392+
8393+#ifdef CONFIG_PAX_REFCOUNT
8394+" mcrxr cr0\n"
8395+" addo. %0,%1,%0\n"
8396+" bf 4*cr0+so, 3f\n"
8397+"2:.long " "0x00c00b00""\n"
8398+#else
8399+" add %0,%1,%0\n"
8400+#endif
8401+
8402+"3:\n"
8403+ PPC405_ERR77(0,%2)
8404+ PPC_STLCX "%0,0,%2 \n\
8405+ bne- 1b"
8406+
8407+#ifdef CONFIG_PAX_REFCOUNT
8408+"\n4:\n"
8409+ _ASM_EXTABLE(2b, 4b)
8410+#endif
8411+
8412+ : "=&r" (t)
8413+ : "r" (a), "r" (&(l->a.counter))
8414+ : "cc", "memory");
8415+
8416+ return t;
8417+}
8418+
8419+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8420+{
8421+ long t;
8422+
8423+ __asm__ __volatile__(
8424 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8425 add %0,%1,%0\n"
8426 PPC405_ERR77(0,%2)
8427@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8428
8429 #define local_cmpxchg(l, o, n) \
8430 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8431+#define local_cmpxchg_unchecked(l, o, n) \
8432+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8433 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8434
8435 /**
8436diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8437index 8565c25..2865190 100644
8438--- a/arch/powerpc/include/asm/mman.h
8439+++ b/arch/powerpc/include/asm/mman.h
8440@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8441 }
8442 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8443
8444-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8445+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8446 {
8447 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8448 }
8449diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8450index 69c0598..2c56964 100644
8451--- a/arch/powerpc/include/asm/page.h
8452+++ b/arch/powerpc/include/asm/page.h
8453@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8454 * and needs to be executable. This means the whole heap ends
8455 * up being executable.
8456 */
8457-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8458- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8459+#define VM_DATA_DEFAULT_FLAGS32 \
8460+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8461+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8462
8463 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8464 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8465@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8466 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8467 #endif
8468
8469+#define ktla_ktva(addr) (addr)
8470+#define ktva_ktla(addr) (addr)
8471+
8472 #ifndef CONFIG_PPC_BOOK3S_64
8473 /*
8474 * Use the top bit of the higher-level page table entries to indicate whether
8475diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8476index d908a46..3753f71 100644
8477--- a/arch/powerpc/include/asm/page_64.h
8478+++ b/arch/powerpc/include/asm/page_64.h
8479@@ -172,15 +172,18 @@ do { \
8480 * stack by default, so in the absence of a PT_GNU_STACK program header
8481 * we turn execute permission off.
8482 */
8483-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8484- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8485+#define VM_STACK_DEFAULT_FLAGS32 \
8486+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8487+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8488
8489 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8490 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8491
8492+#ifndef CONFIG_PAX_PAGEEXEC
8493 #define VM_STACK_DEFAULT_FLAGS \
8494 (is_32bit_task() ? \
8495 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8496+#endif
8497
8498 #include <asm-generic/getorder.h>
8499
8500diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8501index 4b0be20..c15a27d 100644
8502--- a/arch/powerpc/include/asm/pgalloc-64.h
8503+++ b/arch/powerpc/include/asm/pgalloc-64.h
8504@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8505 #ifndef CONFIG_PPC_64K_PAGES
8506
8507 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8508+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8509
8510 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8511 {
8512@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8513 pud_set(pud, (unsigned long)pmd);
8514 }
8515
8516+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8517+{
8518+ pud_populate(mm, pud, pmd);
8519+}
8520+
8521 #define pmd_populate(mm, pmd, pte_page) \
8522 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8523 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8524@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8525 #endif
8526
8527 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8528+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8529
8530 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8531 pte_t *pte)
8532diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8533index a8805fe..6d69617 100644
8534--- a/arch/powerpc/include/asm/pgtable.h
8535+++ b/arch/powerpc/include/asm/pgtable.h
8536@@ -2,6 +2,7 @@
8537 #define _ASM_POWERPC_PGTABLE_H
8538 #ifdef __KERNEL__
8539
8540+#include <linux/const.h>
8541 #ifndef __ASSEMBLY__
8542 #include <linux/mmdebug.h>
8543 #include <linux/mmzone.h>
8544diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8545index 4aad413..85d86bf 100644
8546--- a/arch/powerpc/include/asm/pte-hash32.h
8547+++ b/arch/powerpc/include/asm/pte-hash32.h
8548@@ -21,6 +21,7 @@
8549 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8550 #define _PAGE_USER 0x004 /* usermode access allowed */
8551 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8552+#define _PAGE_EXEC _PAGE_GUARDED
8553 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8554 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8555 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8556diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8557index 1c874fb..e8480a4 100644
8558--- a/arch/powerpc/include/asm/reg.h
8559+++ b/arch/powerpc/include/asm/reg.h
8560@@ -253,6 +253,7 @@
8561 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8562 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8563 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8564+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8565 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8566 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8567 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8568diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8569index 5a6614a..d89995d1 100644
8570--- a/arch/powerpc/include/asm/smp.h
8571+++ b/arch/powerpc/include/asm/smp.h
8572@@ -51,7 +51,7 @@ struct smp_ops_t {
8573 int (*cpu_disable)(void);
8574 void (*cpu_die)(unsigned int nr);
8575 int (*cpu_bootable)(unsigned int nr);
8576-};
8577+} __no_const;
8578
8579 extern void smp_send_debugger_break(void);
8580 extern void start_secondary_resume(void);
8581diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8582index 4dbe072..b803275 100644
8583--- a/arch/powerpc/include/asm/spinlock.h
8584+++ b/arch/powerpc/include/asm/spinlock.h
8585@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8586 __asm__ __volatile__(
8587 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8588 __DO_SIGN_EXTEND
8589-" addic. %0,%0,1\n\
8590- ble- 2f\n"
8591+
8592+#ifdef CONFIG_PAX_REFCOUNT
8593+" mcrxr cr0\n"
8594+" addico. %0,%0,1\n"
8595+" bf 4*cr0+so, 3f\n"
8596+"2:.long " "0x00c00b00""\n"
8597+#else
8598+" addic. %0,%0,1\n"
8599+#endif
8600+
8601+"3:\n"
8602+ "ble- 4f\n"
8603 PPC405_ERR77(0,%1)
8604 " stwcx. %0,0,%1\n\
8605 bne- 1b\n"
8606 PPC_ACQUIRE_BARRIER
8607-"2:" : "=&r" (tmp)
8608+"4:"
8609+
8610+#ifdef CONFIG_PAX_REFCOUNT
8611+ _ASM_EXTABLE(2b,4b)
8612+#endif
8613+
8614+ : "=&r" (tmp)
8615 : "r" (&rw->lock)
8616 : "cr0", "xer", "memory");
8617
8618@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8619 __asm__ __volatile__(
8620 "# read_unlock\n\t"
8621 PPC_RELEASE_BARRIER
8622-"1: lwarx %0,0,%1\n\
8623- addic %0,%0,-1\n"
8624+"1: lwarx %0,0,%1\n"
8625+
8626+#ifdef CONFIG_PAX_REFCOUNT
8627+" mcrxr cr0\n"
8628+" addico. %0,%0,-1\n"
8629+" bf 4*cr0+so, 3f\n"
8630+"2:.long " "0x00c00b00""\n"
8631+#else
8632+" addic. %0,%0,-1\n"
8633+#endif
8634+
8635+"3:\n"
8636 PPC405_ERR77(0,%1)
8637 " stwcx. %0,0,%1\n\
8638 bne- 1b"
8639+
8640+#ifdef CONFIG_PAX_REFCOUNT
8641+"\n4:\n"
8642+ _ASM_EXTABLE(2b, 4b)
8643+#endif
8644+
8645 : "=&r"(tmp)
8646 : "r"(&rw->lock)
8647 : "cr0", "xer", "memory");
8648diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8649index 0be6c68..9c3c6ee 100644
8650--- a/arch/powerpc/include/asm/thread_info.h
8651+++ b/arch/powerpc/include/asm/thread_info.h
8652@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8653 #if defined(CONFIG_PPC64)
8654 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8655 #endif
8656+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8657+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8658
8659 /* as above, but as bit values */
8660 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8661@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8662 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8663 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8664 #define _TIF_NOHZ (1<<TIF_NOHZ)
8665+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8666 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8667 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8668- _TIF_NOHZ)
8669+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8670
8671 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8672 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8673diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8674index a0c071d..49cdc7f 100644
8675--- a/arch/powerpc/include/asm/uaccess.h
8676+++ b/arch/powerpc/include/asm/uaccess.h
8677@@ -58,6 +58,7 @@
8678
8679 #endif
8680
8681+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8682 #define access_ok(type, addr, size) \
8683 (__chk_user_ptr(addr), \
8684 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8685@@ -318,52 +319,6 @@ do { \
8686 extern unsigned long __copy_tofrom_user(void __user *to,
8687 const void __user *from, unsigned long size);
8688
8689-#ifndef __powerpc64__
8690-
8691-static inline unsigned long copy_from_user(void *to,
8692- const void __user *from, unsigned long n)
8693-{
8694- unsigned long over;
8695-
8696- if (access_ok(VERIFY_READ, from, n))
8697- return __copy_tofrom_user((__force void __user *)to, from, n);
8698- if ((unsigned long)from < TASK_SIZE) {
8699- over = (unsigned long)from + n - TASK_SIZE;
8700- return __copy_tofrom_user((__force void __user *)to, from,
8701- n - over) + over;
8702- }
8703- return n;
8704-}
8705-
8706-static inline unsigned long copy_to_user(void __user *to,
8707- const void *from, unsigned long n)
8708-{
8709- unsigned long over;
8710-
8711- if (access_ok(VERIFY_WRITE, to, n))
8712- return __copy_tofrom_user(to, (__force void __user *)from, n);
8713- if ((unsigned long)to < TASK_SIZE) {
8714- over = (unsigned long)to + n - TASK_SIZE;
8715- return __copy_tofrom_user(to, (__force void __user *)from,
8716- n - over) + over;
8717- }
8718- return n;
8719-}
8720-
8721-#else /* __powerpc64__ */
8722-
8723-#define __copy_in_user(to, from, size) \
8724- __copy_tofrom_user((to), (from), (size))
8725-
8726-extern unsigned long copy_from_user(void *to, const void __user *from,
8727- unsigned long n);
8728-extern unsigned long copy_to_user(void __user *to, const void *from,
8729- unsigned long n);
8730-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8731- unsigned long n);
8732-
8733-#endif /* __powerpc64__ */
8734-
8735 static inline unsigned long __copy_from_user_inatomic(void *to,
8736 const void __user *from, unsigned long n)
8737 {
8738@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8739 if (ret == 0)
8740 return 0;
8741 }
8742+
8743+ if (!__builtin_constant_p(n))
8744+ check_object_size(to, n, false);
8745+
8746 return __copy_tofrom_user((__force void __user *)to, from, n);
8747 }
8748
8749@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8750 if (ret == 0)
8751 return 0;
8752 }
8753+
8754+ if (!__builtin_constant_p(n))
8755+ check_object_size(from, n, true);
8756+
8757 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8758 }
8759
8760@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8761 return __copy_to_user_inatomic(to, from, size);
8762 }
8763
8764+#ifndef __powerpc64__
8765+
8766+static inline unsigned long __must_check copy_from_user(void *to,
8767+ const void __user *from, unsigned long n)
8768+{
8769+ unsigned long over;
8770+
8771+ if ((long)n < 0)
8772+ return n;
8773+
8774+ if (access_ok(VERIFY_READ, from, n)) {
8775+ if (!__builtin_constant_p(n))
8776+ check_object_size(to, n, false);
8777+ return __copy_tofrom_user((__force void __user *)to, from, n);
8778+ }
8779+ if ((unsigned long)from < TASK_SIZE) {
8780+ over = (unsigned long)from + n - TASK_SIZE;
8781+ if (!__builtin_constant_p(n - over))
8782+ check_object_size(to, n - over, false);
8783+ return __copy_tofrom_user((__force void __user *)to, from,
8784+ n - over) + over;
8785+ }
8786+ return n;
8787+}
8788+
8789+static inline unsigned long __must_check copy_to_user(void __user *to,
8790+ const void *from, unsigned long n)
8791+{
8792+ unsigned long over;
8793+
8794+ if ((long)n < 0)
8795+ return n;
8796+
8797+ if (access_ok(VERIFY_WRITE, to, n)) {
8798+ if (!__builtin_constant_p(n))
8799+ check_object_size(from, n, true);
8800+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8801+ }
8802+ if ((unsigned long)to < TASK_SIZE) {
8803+ over = (unsigned long)to + n - TASK_SIZE;
8804+ if (!__builtin_constant_p(n))
8805+ check_object_size(from, n - over, true);
8806+ return __copy_tofrom_user(to, (__force void __user *)from,
8807+ n - over) + over;
8808+ }
8809+ return n;
8810+}
8811+
8812+#else /* __powerpc64__ */
8813+
8814+#define __copy_in_user(to, from, size) \
8815+ __copy_tofrom_user((to), (from), (size))
8816+
8817+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8818+{
8819+ if ((long)n < 0 || n > INT_MAX)
8820+ return n;
8821+
8822+ if (!__builtin_constant_p(n))
8823+ check_object_size(to, n, false);
8824+
8825+ if (likely(access_ok(VERIFY_READ, from, n)))
8826+ n = __copy_from_user(to, from, n);
8827+ else
8828+ memset(to, 0, n);
8829+ return n;
8830+}
8831+
8832+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8833+{
8834+ if ((long)n < 0 || n > INT_MAX)
8835+ return n;
8836+
8837+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8838+ if (!__builtin_constant_p(n))
8839+ check_object_size(from, n, true);
8840+ n = __copy_to_user(to, from, n);
8841+ }
8842+ return n;
8843+}
8844+
8845+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8846+ unsigned long n);
8847+
8848+#endif /* __powerpc64__ */
8849+
8850 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8851
8852 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8853diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8854index 502cf69..53936a1 100644
8855--- a/arch/powerpc/kernel/Makefile
8856+++ b/arch/powerpc/kernel/Makefile
8857@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8858 CFLAGS_btext.o += -fPIC
8859 endif
8860
8861+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8862+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8863+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8864+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8865+
8866 ifdef CONFIG_FUNCTION_TRACER
8867 # Do not trace early boot code
8868 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8869@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8870 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8871 endif
8872
8873+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8874+
8875 obj-y := cputable.o ptrace.o syscalls.o \
8876 irq.o align.o signal_32.o pmc.o vdso.o \
8877 process.o systbl.o idle.o \
8878diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8879index 3e68d1c..72a5ee6 100644
8880--- a/arch/powerpc/kernel/exceptions-64e.S
8881+++ b/arch/powerpc/kernel/exceptions-64e.S
8882@@ -1010,6 +1010,7 @@ storage_fault_common:
8883 std r14,_DAR(r1)
8884 std r15,_DSISR(r1)
8885 addi r3,r1,STACK_FRAME_OVERHEAD
8886+ bl save_nvgprs
8887 mr r4,r14
8888 mr r5,r15
8889 ld r14,PACA_EXGEN+EX_R14(r13)
8890@@ -1018,8 +1019,7 @@ storage_fault_common:
8891 cmpdi r3,0
8892 bne- 1f
8893 b ret_from_except_lite
8894-1: bl save_nvgprs
8895- mr r5,r3
8896+1: mr r5,r3
8897 addi r3,r1,STACK_FRAME_OVERHEAD
8898 ld r4,_DAR(r1)
8899 bl bad_page_fault
8900diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8901index c2df815..bae3d12 100644
8902--- a/arch/powerpc/kernel/exceptions-64s.S
8903+++ b/arch/powerpc/kernel/exceptions-64s.S
8904@@ -1599,10 +1599,10 @@ handle_page_fault:
8905 11: ld r4,_DAR(r1)
8906 ld r5,_DSISR(r1)
8907 addi r3,r1,STACK_FRAME_OVERHEAD
8908+ bl save_nvgprs
8909 bl do_page_fault
8910 cmpdi r3,0
8911 beq+ 12f
8912- bl save_nvgprs
8913 mr r5,r3
8914 addi r3,r1,STACK_FRAME_OVERHEAD
8915 lwz r4,_DAR(r1)
8916diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8917index 4509603..cdb491f 100644
8918--- a/arch/powerpc/kernel/irq.c
8919+++ b/arch/powerpc/kernel/irq.c
8920@@ -460,6 +460,8 @@ void migrate_irqs(void)
8921 }
8922 #endif
8923
8924+extern void gr_handle_kernel_exploit(void);
8925+
8926 static inline void check_stack_overflow(void)
8927 {
8928 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8929@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8930 pr_err("do_IRQ: stack overflow: %ld\n",
8931 sp - sizeof(struct thread_info));
8932 dump_stack();
8933+ gr_handle_kernel_exploit();
8934 }
8935 #endif
8936 }
8937diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8938index c94d2e0..992a9ce 100644
8939--- a/arch/powerpc/kernel/module_32.c
8940+++ b/arch/powerpc/kernel/module_32.c
8941@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8942 me->arch.core_plt_section = i;
8943 }
8944 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8945- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8946+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8947 return -ENOEXEC;
8948 }
8949
8950@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8951
8952 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8953 /* Init, or core PLT? */
8954- if (location >= mod->module_core
8955- && location < mod->module_core + mod->core_size)
8956+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8957+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8958 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8959- else
8960+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8961+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8962 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8963+ else {
8964+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8965+ return ~0UL;
8966+ }
8967
8968 /* Find this entry, or if that fails, the next avail. entry */
8969 while (entry->jump[0]) {
8970@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8971 }
8972 #ifdef CONFIG_DYNAMIC_FTRACE
8973 module->arch.tramp =
8974- do_plt_call(module->module_core,
8975+ do_plt_call(module->module_core_rx,
8976 (unsigned long)ftrace_caller,
8977 sechdrs, module);
8978 #endif
8979diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8980index b4cc7be..1fe8bb3 100644
8981--- a/arch/powerpc/kernel/process.c
8982+++ b/arch/powerpc/kernel/process.c
8983@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8984 * Lookup NIP late so we have the best change of getting the
8985 * above info out without failing
8986 */
8987- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8988- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8989+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8990+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8991 #endif
8992 show_stack(current, (unsigned long *) regs->gpr[1]);
8993 if (!user_mode(regs))
8994@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8995 newsp = stack[0];
8996 ip = stack[STACK_FRAME_LR_SAVE];
8997 if (!firstframe || ip != lr) {
8998- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8999+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9000 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9001 if ((ip == rth) && curr_frame >= 0) {
9002- printk(" (%pS)",
9003+ printk(" (%pA)",
9004 (void *)current->ret_stack[curr_frame].ret);
9005 curr_frame--;
9006 }
9007@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9008 struct pt_regs *regs = (struct pt_regs *)
9009 (sp + STACK_FRAME_OVERHEAD);
9010 lr = regs->link;
9011- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9012+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9013 regs->trap, (void *)regs->nip, (void *)lr);
9014 firstframe = 1;
9015 }
9016@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
9017 mtspr(SPRN_CTRLT, ctrl);
9018 }
9019 #endif /* CONFIG_PPC64 */
9020-
9021-unsigned long arch_align_stack(unsigned long sp)
9022-{
9023- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9024- sp -= get_random_int() & ~PAGE_MASK;
9025- return sp & ~0xf;
9026-}
9027-
9028-static inline unsigned long brk_rnd(void)
9029-{
9030- unsigned long rnd = 0;
9031-
9032- /* 8MB for 32bit, 1GB for 64bit */
9033- if (is_32bit_task())
9034- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9035- else
9036- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9037-
9038- return rnd << PAGE_SHIFT;
9039-}
9040-
9041-unsigned long arch_randomize_brk(struct mm_struct *mm)
9042-{
9043- unsigned long base = mm->brk;
9044- unsigned long ret;
9045-
9046-#ifdef CONFIG_PPC_STD_MMU_64
9047- /*
9048- * If we are using 1TB segments and we are allowed to randomise
9049- * the heap, we can put it above 1TB so it is backed by a 1TB
9050- * segment. Otherwise the heap will be in the bottom 1TB
9051- * which always uses 256MB segments and this may result in a
9052- * performance penalty.
9053- */
9054- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9055- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9056-#endif
9057-
9058- ret = PAGE_ALIGN(base + brk_rnd());
9059-
9060- if (ret < mm->brk)
9061- return mm->brk;
9062-
9063- return ret;
9064-}
9065-
9066diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9067index f21897b..28c0428 100644
9068--- a/arch/powerpc/kernel/ptrace.c
9069+++ b/arch/powerpc/kernel/ptrace.c
9070@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9071 return ret;
9072 }
9073
9074+#ifdef CONFIG_GRKERNSEC_SETXID
9075+extern void gr_delayed_cred_worker(void);
9076+#endif
9077+
9078 /*
9079 * We must return the syscall number to actually look up in the table.
9080 * This can be -1L to skip running any syscall at all.
9081@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9082
9083 secure_computing_strict(regs->gpr[0]);
9084
9085+#ifdef CONFIG_GRKERNSEC_SETXID
9086+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9087+ gr_delayed_cred_worker();
9088+#endif
9089+
9090 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9091 tracehook_report_syscall_entry(regs))
9092 /*
9093@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9094 {
9095 int step;
9096
9097+#ifdef CONFIG_GRKERNSEC_SETXID
9098+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9099+ gr_delayed_cred_worker();
9100+#endif
9101+
9102 audit_syscall_exit(regs);
9103
9104 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9105diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9106index b171001..4ac7ac5 100644
9107--- a/arch/powerpc/kernel/signal_32.c
9108+++ b/arch/powerpc/kernel/signal_32.c
9109@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9110 /* Save user registers on the stack */
9111 frame = &rt_sf->uc.uc_mcontext;
9112 addr = frame;
9113- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9114+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9115 sigret = 0;
9116 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9117 } else {
9118diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9119index 2cb0c94..c0c0bc9 100644
9120--- a/arch/powerpc/kernel/signal_64.c
9121+++ b/arch/powerpc/kernel/signal_64.c
9122@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9123 current->thread.fp_state.fpscr = 0;
9124
9125 /* Set up to return from userspace. */
9126- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9127+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9128 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9129 } else {
9130 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9131diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9132index e6595b7..24bde6e 100644
9133--- a/arch/powerpc/kernel/traps.c
9134+++ b/arch/powerpc/kernel/traps.c
9135@@ -36,6 +36,7 @@
9136 #include <linux/debugfs.h>
9137 #include <linux/ratelimit.h>
9138 #include <linux/context_tracking.h>
9139+#include <linux/uaccess.h>
9140
9141 #include <asm/emulated_ops.h>
9142 #include <asm/pgtable.h>
9143@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9144 return flags;
9145 }
9146
9147+extern void gr_handle_kernel_exploit(void);
9148+
9149 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9150 int signr)
9151 {
9152@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9153 panic("Fatal exception in interrupt");
9154 if (panic_on_oops)
9155 panic("Fatal exception");
9156+
9157+ gr_handle_kernel_exploit();
9158+
9159 do_exit(signr);
9160 }
9161
9162@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9163 enum ctx_state prev_state = exception_enter();
9164 unsigned int reason = get_reason(regs);
9165
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+ unsigned int bkpt;
9168+ const struct exception_table_entry *entry;
9169+
9170+ if (reason & REASON_ILLEGAL) {
9171+ /* Check if PaX bad instruction */
9172+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9173+ current->thread.trap_nr = 0;
9174+ pax_report_refcount_overflow(regs);
9175+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9176+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9177+ regs->nip = entry->fixup;
9178+ return;
9179+ }
9180+ /* fixup_exception() could not handle */
9181+ goto bail;
9182+ }
9183+ }
9184+#endif
9185+
9186 /* We can now get here via a FP Unavailable exception if the core
9187 * has no FPU, in that case the reason flags will be 0 */
9188
9189diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9190index 305eb0d..accc5b40 100644
9191--- a/arch/powerpc/kernel/vdso.c
9192+++ b/arch/powerpc/kernel/vdso.c
9193@@ -34,6 +34,7 @@
9194 #include <asm/vdso.h>
9195 #include <asm/vdso_datapage.h>
9196 #include <asm/setup.h>
9197+#include <asm/mman.h>
9198
9199 #undef DEBUG
9200
9201@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9202 vdso_base = VDSO32_MBASE;
9203 #endif
9204
9205- current->mm->context.vdso_base = 0;
9206+ current->mm->context.vdso_base = ~0UL;
9207
9208 /* vDSO has a problem and was disabled, just don't "enable" it for the
9209 * process
9210@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9211 vdso_base = get_unmapped_area(NULL, vdso_base,
9212 (vdso_pages << PAGE_SHIFT) +
9213 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9214- 0, 0);
9215+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9216 if (IS_ERR_VALUE(vdso_base)) {
9217 rc = vdso_base;
9218 goto fail_mmapsem;
9219diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9220index c45eaab..5f41b57 100644
9221--- a/arch/powerpc/kvm/powerpc.c
9222+++ b/arch/powerpc/kvm/powerpc.c
9223@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9224 }
9225 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9226
9227-int kvm_arch_init(void *opaque)
9228+int kvm_arch_init(const void *opaque)
9229 {
9230 return 0;
9231 }
9232diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9233index 5eea6f3..5d10396 100644
9234--- a/arch/powerpc/lib/usercopy_64.c
9235+++ b/arch/powerpc/lib/usercopy_64.c
9236@@ -9,22 +9,6 @@
9237 #include <linux/module.h>
9238 #include <asm/uaccess.h>
9239
9240-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9241-{
9242- if (likely(access_ok(VERIFY_READ, from, n)))
9243- n = __copy_from_user(to, from, n);
9244- else
9245- memset(to, 0, n);
9246- return n;
9247-}
9248-
9249-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9250-{
9251- if (likely(access_ok(VERIFY_WRITE, to, n)))
9252- n = __copy_to_user(to, from, n);
9253- return n;
9254-}
9255-
9256 unsigned long copy_in_user(void __user *to, const void __user *from,
9257 unsigned long n)
9258 {
9259@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9260 return n;
9261 }
9262
9263-EXPORT_SYMBOL(copy_from_user);
9264-EXPORT_SYMBOL(copy_to_user);
9265 EXPORT_SYMBOL(copy_in_user);
9266
9267diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9268index 6154b0a..4de2b19 100644
9269--- a/arch/powerpc/mm/fault.c
9270+++ b/arch/powerpc/mm/fault.c
9271@@ -33,6 +33,10 @@
9272 #include <linux/ratelimit.h>
9273 #include <linux/context_tracking.h>
9274 #include <linux/hugetlb.h>
9275+#include <linux/slab.h>
9276+#include <linux/pagemap.h>
9277+#include <linux/compiler.h>
9278+#include <linux/unistd.h>
9279
9280 #include <asm/firmware.h>
9281 #include <asm/page.h>
9282@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9283 }
9284 #endif
9285
9286+#ifdef CONFIG_PAX_PAGEEXEC
9287+/*
9288+ * PaX: decide what to do with offenders (regs->nip = fault address)
9289+ *
9290+ * returns 1 when task should be killed
9291+ */
9292+static int pax_handle_fetch_fault(struct pt_regs *regs)
9293+{
9294+ return 1;
9295+}
9296+
9297+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9298+{
9299+ unsigned long i;
9300+
9301+ printk(KERN_ERR "PAX: bytes at PC: ");
9302+ for (i = 0; i < 5; i++) {
9303+ unsigned int c;
9304+ if (get_user(c, (unsigned int __user *)pc+i))
9305+ printk(KERN_CONT "???????? ");
9306+ else
9307+ printk(KERN_CONT "%08x ", c);
9308+ }
9309+ printk("\n");
9310+}
9311+#endif
9312+
9313 /*
9314 * Check whether the instruction at regs->nip is a store using
9315 * an update addressing form which will update r1.
9316@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9317 * indicate errors in DSISR but can validly be set in SRR1.
9318 */
9319 if (trap == 0x400)
9320- error_code &= 0x48200000;
9321+ error_code &= 0x58200000;
9322 else
9323 is_write = error_code & DSISR_ISSTORE;
9324 #else
9325@@ -383,7 +414,7 @@ good_area:
9326 * "undefined". Of those that can be set, this is the only
9327 * one which seems bad.
9328 */
9329- if (error_code & 0x10000000)
9330+ if (error_code & DSISR_GUARDED)
9331 /* Guarded storage error. */
9332 goto bad_area;
9333 #endif /* CONFIG_8xx */
9334@@ -398,7 +429,7 @@ good_area:
9335 * processors use the same I/D cache coherency mechanism
9336 * as embedded.
9337 */
9338- if (error_code & DSISR_PROTFAULT)
9339+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9340 goto bad_area;
9341 #endif /* CONFIG_PPC_STD_MMU */
9342
9343@@ -490,6 +521,23 @@ bad_area:
9344 bad_area_nosemaphore:
9345 /* User mode accesses cause a SIGSEGV */
9346 if (user_mode(regs)) {
9347+
9348+#ifdef CONFIG_PAX_PAGEEXEC
9349+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9350+#ifdef CONFIG_PPC_STD_MMU
9351+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9352+#else
9353+ if (is_exec && regs->nip == address) {
9354+#endif
9355+ switch (pax_handle_fetch_fault(regs)) {
9356+ }
9357+
9358+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9359+ do_group_exit(SIGKILL);
9360+ }
9361+ }
9362+#endif
9363+
9364 _exception(SIGSEGV, regs, code, address);
9365 goto bail;
9366 }
9367diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9368index cb8bdbe..cde4bc7 100644
9369--- a/arch/powerpc/mm/mmap.c
9370+++ b/arch/powerpc/mm/mmap.c
9371@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9372 return sysctl_legacy_va_layout;
9373 }
9374
9375-static unsigned long mmap_rnd(void)
9376+static unsigned long mmap_rnd(struct mm_struct *mm)
9377 {
9378 unsigned long rnd = 0;
9379
9380+#ifdef CONFIG_PAX_RANDMMAP
9381+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9382+#endif
9383+
9384 if (current->flags & PF_RANDOMIZE) {
9385 /* 8MB for 32bit, 1GB for 64bit */
9386 if (is_32bit_task())
9387@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9388 return rnd << PAGE_SHIFT;
9389 }
9390
9391-static inline unsigned long mmap_base(void)
9392+static inline unsigned long mmap_base(struct mm_struct *mm)
9393 {
9394 unsigned long gap = rlimit(RLIMIT_STACK);
9395
9396@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9397 else if (gap > MAX_GAP)
9398 gap = MAX_GAP;
9399
9400- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9401+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9402 }
9403
9404 /*
9405@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9406 */
9407 if (mmap_is_legacy()) {
9408 mm->mmap_base = TASK_UNMAPPED_BASE;
9409+
9410+#ifdef CONFIG_PAX_RANDMMAP
9411+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9412+ mm->mmap_base += mm->delta_mmap;
9413+#endif
9414+
9415 mm->get_unmapped_area = arch_get_unmapped_area;
9416 } else {
9417- mm->mmap_base = mmap_base();
9418+ mm->mmap_base = mmap_base(mm);
9419+
9420+#ifdef CONFIG_PAX_RANDMMAP
9421+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9422+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9423+#endif
9424+
9425 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9426 }
9427 }
9428diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9429index ded0ea1..f213a9b 100644
9430--- a/arch/powerpc/mm/slice.c
9431+++ b/arch/powerpc/mm/slice.c
9432@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9433 if ((mm->task_size - len) < addr)
9434 return 0;
9435 vma = find_vma(mm, addr);
9436- return (!vma || (addr + len) <= vma->vm_start);
9437+ return check_heap_stack_gap(vma, addr, len, 0);
9438 }
9439
9440 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9441@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9442 info.align_offset = 0;
9443
9444 addr = TASK_UNMAPPED_BASE;
9445+
9446+#ifdef CONFIG_PAX_RANDMMAP
9447+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9448+ addr += mm->delta_mmap;
9449+#endif
9450+
9451 while (addr < TASK_SIZE) {
9452 info.low_limit = addr;
9453 if (!slice_scan_available(addr, available, 1, &addr))
9454@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9455 if (fixed && addr > (mm->task_size - len))
9456 return -ENOMEM;
9457
9458+#ifdef CONFIG_PAX_RANDMMAP
9459+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9460+ addr = 0;
9461+#endif
9462+
9463 /* If hint, make sure it matches our alignment restrictions */
9464 if (!fixed && addr) {
9465 addr = _ALIGN_UP(addr, 1ul << pshift);
9466diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9467index f223875..94170e4 100644
9468--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9469+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9470@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9471 }
9472
9473 static struct pci_ops scc_pciex_pci_ops = {
9474- scc_pciex_read_config,
9475- scc_pciex_write_config,
9476+ .read = scc_pciex_read_config,
9477+ .write = scc_pciex_write_config,
9478 };
9479
9480 static void pciex_clear_intr_all(unsigned int __iomem *base)
9481diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9482index d966bbe..372124a 100644
9483--- a/arch/powerpc/platforms/cell/spufs/file.c
9484+++ b/arch/powerpc/platforms/cell/spufs/file.c
9485@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9486 return VM_FAULT_NOPAGE;
9487 }
9488
9489-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9490+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9491 unsigned long address,
9492- void *buf, int len, int write)
9493+ void *buf, size_t len, int write)
9494 {
9495 struct spu_context *ctx = vma->vm_file->private_data;
9496 unsigned long offset = address - vma->vm_start;
9497diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9498index fa934fe..c296056 100644
9499--- a/arch/s390/include/asm/atomic.h
9500+++ b/arch/s390/include/asm/atomic.h
9501@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9502 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9503 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9504
9505+#define atomic64_read_unchecked(v) atomic64_read(v)
9506+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9507+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9508+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9509+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9510+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9511+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9512+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9513+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9514+
9515 #endif /* __ARCH_S390_ATOMIC__ */
9516diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9517index 8d72471..5322500 100644
9518--- a/arch/s390/include/asm/barrier.h
9519+++ b/arch/s390/include/asm/barrier.h
9520@@ -42,7 +42,7 @@
9521 do { \
9522 compiletime_assert_atomic_type(*p); \
9523 barrier(); \
9524- ACCESS_ONCE(*p) = (v); \
9525+ ACCESS_ONCE_RW(*p) = (v); \
9526 } while (0)
9527
9528 #define smp_load_acquire(p) \
9529diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9530index 4d7ccac..d03d0ad 100644
9531--- a/arch/s390/include/asm/cache.h
9532+++ b/arch/s390/include/asm/cache.h
9533@@ -9,8 +9,10 @@
9534 #ifndef __ARCH_S390_CACHE_H
9535 #define __ARCH_S390_CACHE_H
9536
9537-#define L1_CACHE_BYTES 256
9538+#include <linux/const.h>
9539+
9540 #define L1_CACHE_SHIFT 8
9541+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9542 #define NET_SKB_PAD 32
9543
9544 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9545diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9546index f6e43d3..5f57681 100644
9547--- a/arch/s390/include/asm/elf.h
9548+++ b/arch/s390/include/asm/elf.h
9549@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9550 the loader. We need to make sure that it is out of the way of the program
9551 that it will "exec", and that there is sufficient room for the brk. */
9552
9553-extern unsigned long randomize_et_dyn(unsigned long base);
9554-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9555+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9556+
9557+#ifdef CONFIG_PAX_ASLR
9558+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9559+
9560+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9561+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9562+#endif
9563
9564 /* This yields a mask that user programs can use to figure out what
9565 instruction set this CPU supports. */
9566@@ -223,9 +229,6 @@ struct linux_binprm;
9567 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9568 int arch_setup_additional_pages(struct linux_binprm *, int);
9569
9570-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9571-#define arch_randomize_brk arch_randomize_brk
9572-
9573 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9574
9575 #endif
9576diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9577index c4a93d6..4d2a9b4 100644
9578--- a/arch/s390/include/asm/exec.h
9579+++ b/arch/s390/include/asm/exec.h
9580@@ -7,6 +7,6 @@
9581 #ifndef __ASM_EXEC_H
9582 #define __ASM_EXEC_H
9583
9584-extern unsigned long arch_align_stack(unsigned long sp);
9585+#define arch_align_stack(x) ((x) & ~0xfUL)
9586
9587 #endif /* __ASM_EXEC_H */
9588diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9589index cd4c68e..6764641 100644
9590--- a/arch/s390/include/asm/uaccess.h
9591+++ b/arch/s390/include/asm/uaccess.h
9592@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9593 __range_ok((unsigned long)(addr), (size)); \
9594 })
9595
9596+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9597 #define access_ok(type, addr, size) __access_ok(addr, size)
9598
9599 /*
9600@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9601 copy_to_user(void __user *to, const void *from, unsigned long n)
9602 {
9603 might_fault();
9604+
9605+ if ((long)n < 0)
9606+ return n;
9607+
9608 return __copy_to_user(to, from, n);
9609 }
9610
9611@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9612 static inline unsigned long __must_check
9613 copy_from_user(void *to, const void __user *from, unsigned long n)
9614 {
9615- unsigned int sz = __compiletime_object_size(to);
9616+ size_t sz = __compiletime_object_size(to);
9617
9618 might_fault();
9619- if (unlikely(sz != -1 && sz < n)) {
9620+
9621+ if ((long)n < 0)
9622+ return n;
9623+
9624+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9625 copy_from_user_overflow();
9626 return n;
9627 }
9628diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9629index 409d152..d90d368 100644
9630--- a/arch/s390/kernel/module.c
9631+++ b/arch/s390/kernel/module.c
9632@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9633
9634 /* Increase core size by size of got & plt and set start
9635 offsets for got and plt. */
9636- me->core_size = ALIGN(me->core_size, 4);
9637- me->arch.got_offset = me->core_size;
9638- me->core_size += me->arch.got_size;
9639- me->arch.plt_offset = me->core_size;
9640- me->core_size += me->arch.plt_size;
9641+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9642+ me->arch.got_offset = me->core_size_rw;
9643+ me->core_size_rw += me->arch.got_size;
9644+ me->arch.plt_offset = me->core_size_rx;
9645+ me->core_size_rx += me->arch.plt_size;
9646 return 0;
9647 }
9648
9649@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9650 if (info->got_initialized == 0) {
9651 Elf_Addr *gotent;
9652
9653- gotent = me->module_core + me->arch.got_offset +
9654+ gotent = me->module_core_rw + me->arch.got_offset +
9655 info->got_offset;
9656 *gotent = val;
9657 info->got_initialized = 1;
9658@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9659 rc = apply_rela_bits(loc, val, 0, 64, 0);
9660 else if (r_type == R_390_GOTENT ||
9661 r_type == R_390_GOTPLTENT) {
9662- val += (Elf_Addr) me->module_core - loc;
9663+ val += (Elf_Addr) me->module_core_rw - loc;
9664 rc = apply_rela_bits(loc, val, 1, 32, 1);
9665 }
9666 break;
9667@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9668 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9669 if (info->plt_initialized == 0) {
9670 unsigned int *ip;
9671- ip = me->module_core + me->arch.plt_offset +
9672+ ip = me->module_core_rx + me->arch.plt_offset +
9673 info->plt_offset;
9674 #ifndef CONFIG_64BIT
9675 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9676@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9677 val - loc + 0xffffUL < 0x1ffffeUL) ||
9678 (r_type == R_390_PLT32DBL &&
9679 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9680- val = (Elf_Addr) me->module_core +
9681+ val = (Elf_Addr) me->module_core_rx +
9682 me->arch.plt_offset +
9683 info->plt_offset;
9684 val += rela->r_addend - loc;
9685@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9686 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9687 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9688 val = val + rela->r_addend -
9689- ((Elf_Addr) me->module_core + me->arch.got_offset);
9690+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9691 if (r_type == R_390_GOTOFF16)
9692 rc = apply_rela_bits(loc, val, 0, 16, 0);
9693 else if (r_type == R_390_GOTOFF32)
9694@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9695 break;
9696 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9697 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9698- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9699+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9700 rela->r_addend - loc;
9701 if (r_type == R_390_GOTPC)
9702 rc = apply_rela_bits(loc, val, 1, 32, 0);
9703diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9704index aa7a839..6c2a916 100644
9705--- a/arch/s390/kernel/process.c
9706+++ b/arch/s390/kernel/process.c
9707@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9708 }
9709 return 0;
9710 }
9711-
9712-unsigned long arch_align_stack(unsigned long sp)
9713-{
9714- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9715- sp -= get_random_int() & ~PAGE_MASK;
9716- return sp & ~0xf;
9717-}
9718-
9719-static inline unsigned long brk_rnd(void)
9720-{
9721- /* 8MB for 32bit, 1GB for 64bit */
9722- if (is_32bit_task())
9723- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9724- else
9725- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9726-}
9727-
9728-unsigned long arch_randomize_brk(struct mm_struct *mm)
9729-{
9730- unsigned long ret;
9731-
9732- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9733- return (ret > mm->brk) ? ret : mm->brk;
9734-}
9735-
9736-unsigned long randomize_et_dyn(unsigned long base)
9737-{
9738- unsigned long ret;
9739-
9740- if (!(current->flags & PF_RANDOMIZE))
9741- return base;
9742- ret = PAGE_ALIGN(base + brk_rnd());
9743- return (ret > base) ? ret : base;
9744-}
9745diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9746index 9b436c2..54fbf0a 100644
9747--- a/arch/s390/mm/mmap.c
9748+++ b/arch/s390/mm/mmap.c
9749@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9750 */
9751 if (mmap_is_legacy()) {
9752 mm->mmap_base = mmap_base_legacy();
9753+
9754+#ifdef CONFIG_PAX_RANDMMAP
9755+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9756+ mm->mmap_base += mm->delta_mmap;
9757+#endif
9758+
9759 mm->get_unmapped_area = arch_get_unmapped_area;
9760 } else {
9761 mm->mmap_base = mmap_base();
9762+
9763+#ifdef CONFIG_PAX_RANDMMAP
9764+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9765+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9766+#endif
9767+
9768 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9769 }
9770 }
9771@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9772 */
9773 if (mmap_is_legacy()) {
9774 mm->mmap_base = mmap_base_legacy();
9775+
9776+#ifdef CONFIG_PAX_RANDMMAP
9777+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9778+ mm->mmap_base += mm->delta_mmap;
9779+#endif
9780+
9781 mm->get_unmapped_area = s390_get_unmapped_area;
9782 } else {
9783 mm->mmap_base = mmap_base();
9784+
9785+#ifdef CONFIG_PAX_RANDMMAP
9786+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9787+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9788+#endif
9789+
9790 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9791 }
9792 }
9793diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9794index ae3d59f..f65f075 100644
9795--- a/arch/score/include/asm/cache.h
9796+++ b/arch/score/include/asm/cache.h
9797@@ -1,7 +1,9 @@
9798 #ifndef _ASM_SCORE_CACHE_H
9799 #define _ASM_SCORE_CACHE_H
9800
9801+#include <linux/const.h>
9802+
9803 #define L1_CACHE_SHIFT 4
9804-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9805+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9806
9807 #endif /* _ASM_SCORE_CACHE_H */
9808diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9809index f9f3cd5..58ff438 100644
9810--- a/arch/score/include/asm/exec.h
9811+++ b/arch/score/include/asm/exec.h
9812@@ -1,6 +1,6 @@
9813 #ifndef _ASM_SCORE_EXEC_H
9814 #define _ASM_SCORE_EXEC_H
9815
9816-extern unsigned long arch_align_stack(unsigned long sp);
9817+#define arch_align_stack(x) (x)
9818
9819 #endif /* _ASM_SCORE_EXEC_H */
9820diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9821index a1519ad3..e8ac1ff 100644
9822--- a/arch/score/kernel/process.c
9823+++ b/arch/score/kernel/process.c
9824@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9825
9826 return task_pt_regs(task)->cp0_epc;
9827 }
9828-
9829-unsigned long arch_align_stack(unsigned long sp)
9830-{
9831- return sp;
9832-}
9833diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9834index ef9e555..331bd29 100644
9835--- a/arch/sh/include/asm/cache.h
9836+++ b/arch/sh/include/asm/cache.h
9837@@ -9,10 +9,11 @@
9838 #define __ASM_SH_CACHE_H
9839 #ifdef __KERNEL__
9840
9841+#include <linux/const.h>
9842 #include <linux/init.h>
9843 #include <cpu/cache.h>
9844
9845-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9846+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9847
9848 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9849
9850diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9851index 6777177..cb5e44f 100644
9852--- a/arch/sh/mm/mmap.c
9853+++ b/arch/sh/mm/mmap.c
9854@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9855 struct mm_struct *mm = current->mm;
9856 struct vm_area_struct *vma;
9857 int do_colour_align;
9858+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9859 struct vm_unmapped_area_info info;
9860
9861 if (flags & MAP_FIXED) {
9862@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9863 if (filp || (flags & MAP_SHARED))
9864 do_colour_align = 1;
9865
9866+#ifdef CONFIG_PAX_RANDMMAP
9867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9868+#endif
9869+
9870 if (addr) {
9871 if (do_colour_align)
9872 addr = COLOUR_ALIGN(addr, pgoff);
9873@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9874 addr = PAGE_ALIGN(addr);
9875
9876 vma = find_vma(mm, addr);
9877- if (TASK_SIZE - len >= addr &&
9878- (!vma || addr + len <= vma->vm_start))
9879+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9880 return addr;
9881 }
9882
9883 info.flags = 0;
9884 info.length = len;
9885- info.low_limit = TASK_UNMAPPED_BASE;
9886+ info.low_limit = mm->mmap_base;
9887 info.high_limit = TASK_SIZE;
9888 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9889 info.align_offset = pgoff << PAGE_SHIFT;
9890@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9891 struct mm_struct *mm = current->mm;
9892 unsigned long addr = addr0;
9893 int do_colour_align;
9894+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9895 struct vm_unmapped_area_info info;
9896
9897 if (flags & MAP_FIXED) {
9898@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9899 if (filp || (flags & MAP_SHARED))
9900 do_colour_align = 1;
9901
9902+#ifdef CONFIG_PAX_RANDMMAP
9903+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9904+#endif
9905+
9906 /* requesting a specific address */
9907 if (addr) {
9908 if (do_colour_align)
9909@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9910 addr = PAGE_ALIGN(addr);
9911
9912 vma = find_vma(mm, addr);
9913- if (TASK_SIZE - len >= addr &&
9914- (!vma || addr + len <= vma->vm_start))
9915+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9916 return addr;
9917 }
9918
9919@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9920 VM_BUG_ON(addr != -ENOMEM);
9921 info.flags = 0;
9922 info.low_limit = TASK_UNMAPPED_BASE;
9923+
9924+#ifdef CONFIG_PAX_RANDMMAP
9925+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9926+ info.low_limit += mm->delta_mmap;
9927+#endif
9928+
9929 info.high_limit = TASK_SIZE;
9930 addr = vm_unmapped_area(&info);
9931 }
9932diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9933index 4082749..fd97781 100644
9934--- a/arch/sparc/include/asm/atomic_64.h
9935+++ b/arch/sparc/include/asm/atomic_64.h
9936@@ -15,18 +15,38 @@
9937 #define ATOMIC64_INIT(i) { (i) }
9938
9939 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9940+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9941+{
9942+ return ACCESS_ONCE(v->counter);
9943+}
9944 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9945+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9946+{
9947+ return ACCESS_ONCE(v->counter);
9948+}
9949
9950 #define atomic_set(v, i) (((v)->counter) = i)
9951+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9952+{
9953+ v->counter = i;
9954+}
9955 #define atomic64_set(v, i) (((v)->counter) = i)
9956+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9957+{
9958+ v->counter = i;
9959+}
9960
9961-#define ATOMIC_OP(op) \
9962-void atomic_##op(int, atomic_t *); \
9963-void atomic64_##op(long, atomic64_t *);
9964+#define __ATOMIC_OP(op, suffix) \
9965+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9966+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9967
9968-#define ATOMIC_OP_RETURN(op) \
9969-int atomic_##op##_return(int, atomic_t *); \
9970-long atomic64_##op##_return(long, atomic64_t *);
9971+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9972+
9973+#define __ATOMIC_OP_RETURN(op, suffix) \
9974+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9975+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9976+
9977+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9978
9979 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9980
9981@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9982
9983 #undef ATOMIC_OPS
9984 #undef ATOMIC_OP_RETURN
9985+#undef __ATOMIC_OP_RETURN
9986 #undef ATOMIC_OP
9987+#undef __ATOMIC_OP
9988
9989 #define atomic_dec_return(v) atomic_sub_return(1, v)
9990 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9991
9992 #define atomic_inc_return(v) atomic_add_return(1, v)
9993+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9994+{
9995+ return atomic_add_return_unchecked(1, v);
9996+}
9997 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9998+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9999+{
10000+ return atomic64_add_return_unchecked(1, v);
10001+}
10002
10003 /*
10004 * atomic_inc_and_test - increment and test
10005@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
10006 * other cases.
10007 */
10008 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10009+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10010+{
10011+ return atomic_inc_return_unchecked(v) == 0;
10012+}
10013 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10014
10015 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
10016@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
10017 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
10018
10019 #define atomic_inc(v) atomic_add(1, v)
10020+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10021+{
10022+ atomic_add_unchecked(1, v);
10023+}
10024 #define atomic64_inc(v) atomic64_add(1, v)
10025+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10026+{
10027+ atomic64_add_unchecked(1, v);
10028+}
10029
10030 #define atomic_dec(v) atomic_sub(1, v)
10031+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10032+{
10033+ atomic_sub_unchecked(1, v);
10034+}
10035 #define atomic64_dec(v) atomic64_sub(1, v)
10036+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10037+{
10038+ atomic64_sub_unchecked(1, v);
10039+}
10040
10041 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
10042 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
10043
10044 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10045+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10046+{
10047+ return cmpxchg(&v->counter, old, new);
10048+}
10049 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10050+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10051+{
10052+ return xchg(&v->counter, new);
10053+}
10054
10055 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10056 {
10057- int c, old;
10058+ int c, old, new;
10059 c = atomic_read(v);
10060 for (;;) {
10061- if (unlikely(c == (u)))
10062+ if (unlikely(c == u))
10063 break;
10064- old = atomic_cmpxchg((v), c, c + (a));
10065+
10066+ asm volatile("addcc %2, %0, %0\n"
10067+
10068+#ifdef CONFIG_PAX_REFCOUNT
10069+ "tvs %%icc, 6\n"
10070+#endif
10071+
10072+ : "=r" (new)
10073+ : "0" (c), "ir" (a)
10074+ : "cc");
10075+
10076+ old = atomic_cmpxchg(v, c, new);
10077 if (likely(old == c))
10078 break;
10079 c = old;
10080@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10081 #define atomic64_cmpxchg(v, o, n) \
10082 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10083 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10084+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10085+{
10086+ return xchg(&v->counter, new);
10087+}
10088
10089 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10090 {
10091- long c, old;
10092+ long c, old, new;
10093 c = atomic64_read(v);
10094 for (;;) {
10095- if (unlikely(c == (u)))
10096+ if (unlikely(c == u))
10097 break;
10098- old = atomic64_cmpxchg((v), c, c + (a));
10099+
10100+ asm volatile("addcc %2, %0, %0\n"
10101+
10102+#ifdef CONFIG_PAX_REFCOUNT
10103+ "tvs %%xcc, 6\n"
10104+#endif
10105+
10106+ : "=r" (new)
10107+ : "0" (c), "ir" (a)
10108+ : "cc");
10109+
10110+ old = atomic64_cmpxchg(v, c, new);
10111 if (likely(old == c))
10112 break;
10113 c = old;
10114 }
10115- return c != (u);
10116+ return c != u;
10117 }
10118
10119 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10120diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10121index 7664894..45a974b 100644
10122--- a/arch/sparc/include/asm/barrier_64.h
10123+++ b/arch/sparc/include/asm/barrier_64.h
10124@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10125 do { \
10126 compiletime_assert_atomic_type(*p); \
10127 barrier(); \
10128- ACCESS_ONCE(*p) = (v); \
10129+ ACCESS_ONCE_RW(*p) = (v); \
10130 } while (0)
10131
10132 #define smp_load_acquire(p) \
10133diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10134index 5bb6991..5c2132e 100644
10135--- a/arch/sparc/include/asm/cache.h
10136+++ b/arch/sparc/include/asm/cache.h
10137@@ -7,10 +7,12 @@
10138 #ifndef _SPARC_CACHE_H
10139 #define _SPARC_CACHE_H
10140
10141+#include <linux/const.h>
10142+
10143 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10144
10145 #define L1_CACHE_SHIFT 5
10146-#define L1_CACHE_BYTES 32
10147+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10148
10149 #ifdef CONFIG_SPARC32
10150 #define SMP_CACHE_BYTES_SHIFT 5
10151diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10152index a24e41f..47677ff 100644
10153--- a/arch/sparc/include/asm/elf_32.h
10154+++ b/arch/sparc/include/asm/elf_32.h
10155@@ -114,6 +114,13 @@ typedef struct {
10156
10157 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10158
10159+#ifdef CONFIG_PAX_ASLR
10160+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10161+
10162+#define PAX_DELTA_MMAP_LEN 16
10163+#define PAX_DELTA_STACK_LEN 16
10164+#endif
10165+
10166 /* This yields a mask that user programs can use to figure out what
10167 instruction set this cpu supports. This can NOT be done in userspace
10168 on Sparc. */
10169diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10170index 370ca1e..d4f4a98 100644
10171--- a/arch/sparc/include/asm/elf_64.h
10172+++ b/arch/sparc/include/asm/elf_64.h
10173@@ -189,6 +189,13 @@ typedef struct {
10174 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10175 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10176
10177+#ifdef CONFIG_PAX_ASLR
10178+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10179+
10180+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10181+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10182+#endif
10183+
10184 extern unsigned long sparc64_elf_hwcap;
10185 #define ELF_HWCAP sparc64_elf_hwcap
10186
10187diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10188index a3890da..f6a408e 100644
10189--- a/arch/sparc/include/asm/pgalloc_32.h
10190+++ b/arch/sparc/include/asm/pgalloc_32.h
10191@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10192 }
10193
10194 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10195+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10196
10197 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10198 unsigned long address)
10199diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10200index 5e31871..13469c6 100644
10201--- a/arch/sparc/include/asm/pgalloc_64.h
10202+++ b/arch/sparc/include/asm/pgalloc_64.h
10203@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10204 }
10205
10206 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10207+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10208
10209 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10210 {
10211@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10212 }
10213
10214 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10215+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10216
10217 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10218 {
10219diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10220index 59ba6f6..4518128 100644
10221--- a/arch/sparc/include/asm/pgtable.h
10222+++ b/arch/sparc/include/asm/pgtable.h
10223@@ -5,4 +5,8 @@
10224 #else
10225 #include <asm/pgtable_32.h>
10226 #endif
10227+
10228+#define ktla_ktva(addr) (addr)
10229+#define ktva_ktla(addr) (addr)
10230+
10231 #endif
10232diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10233index b9b91ae..950b91e 100644
10234--- a/arch/sparc/include/asm/pgtable_32.h
10235+++ b/arch/sparc/include/asm/pgtable_32.h
10236@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10237 #define PAGE_SHARED SRMMU_PAGE_SHARED
10238 #define PAGE_COPY SRMMU_PAGE_COPY
10239 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10240+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10241+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10242+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10243 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10244
10245 /* Top-level page directory - dummy used by init-mm.
10246@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10247
10248 /* xwr */
10249 #define __P000 PAGE_NONE
10250-#define __P001 PAGE_READONLY
10251-#define __P010 PAGE_COPY
10252-#define __P011 PAGE_COPY
10253+#define __P001 PAGE_READONLY_NOEXEC
10254+#define __P010 PAGE_COPY_NOEXEC
10255+#define __P011 PAGE_COPY_NOEXEC
10256 #define __P100 PAGE_READONLY
10257 #define __P101 PAGE_READONLY
10258 #define __P110 PAGE_COPY
10259 #define __P111 PAGE_COPY
10260
10261 #define __S000 PAGE_NONE
10262-#define __S001 PAGE_READONLY
10263-#define __S010 PAGE_SHARED
10264-#define __S011 PAGE_SHARED
10265+#define __S001 PAGE_READONLY_NOEXEC
10266+#define __S010 PAGE_SHARED_NOEXEC
10267+#define __S011 PAGE_SHARED_NOEXEC
10268 #define __S100 PAGE_READONLY
10269 #define __S101 PAGE_READONLY
10270 #define __S110 PAGE_SHARED
10271diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10272index 79da178..c2eede8 100644
10273--- a/arch/sparc/include/asm/pgtsrmmu.h
10274+++ b/arch/sparc/include/asm/pgtsrmmu.h
10275@@ -115,6 +115,11 @@
10276 SRMMU_EXEC | SRMMU_REF)
10277 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10278 SRMMU_EXEC | SRMMU_REF)
10279+
10280+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10281+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10282+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10283+
10284 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10285 SRMMU_DIRTY | SRMMU_REF)
10286
10287diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10288index 29d64b1..4272fe8 100644
10289--- a/arch/sparc/include/asm/setup.h
10290+++ b/arch/sparc/include/asm/setup.h
10291@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10292 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10293
10294 /* init_64.c */
10295-extern atomic_t dcpage_flushes;
10296-extern atomic_t dcpage_flushes_xcall;
10297+extern atomic_unchecked_t dcpage_flushes;
10298+extern atomic_unchecked_t dcpage_flushes_xcall;
10299
10300 extern int sysctl_tsb_ratio;
10301 #endif
10302diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10303index 9689176..63c18ea 100644
10304--- a/arch/sparc/include/asm/spinlock_64.h
10305+++ b/arch/sparc/include/asm/spinlock_64.h
10306@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10307
10308 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10309
10310-static void inline arch_read_lock(arch_rwlock_t *lock)
10311+static inline void arch_read_lock(arch_rwlock_t *lock)
10312 {
10313 unsigned long tmp1, tmp2;
10314
10315 __asm__ __volatile__ (
10316 "1: ldsw [%2], %0\n"
10317 " brlz,pn %0, 2f\n"
10318-"4: add %0, 1, %1\n"
10319+"4: addcc %0, 1, %1\n"
10320+
10321+#ifdef CONFIG_PAX_REFCOUNT
10322+" tvs %%icc, 6\n"
10323+#endif
10324+
10325 " cas [%2], %0, %1\n"
10326 " cmp %0, %1\n"
10327 " bne,pn %%icc, 1b\n"
10328@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10329 " .previous"
10330 : "=&r" (tmp1), "=&r" (tmp2)
10331 : "r" (lock)
10332- : "memory");
10333+ : "memory", "cc");
10334 }
10335
10336-static int inline arch_read_trylock(arch_rwlock_t *lock)
10337+static inline int arch_read_trylock(arch_rwlock_t *lock)
10338 {
10339 int tmp1, tmp2;
10340
10341@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10342 "1: ldsw [%2], %0\n"
10343 " brlz,a,pn %0, 2f\n"
10344 " mov 0, %0\n"
10345-" add %0, 1, %1\n"
10346+" addcc %0, 1, %1\n"
10347+
10348+#ifdef CONFIG_PAX_REFCOUNT
10349+" tvs %%icc, 6\n"
10350+#endif
10351+
10352 " cas [%2], %0, %1\n"
10353 " cmp %0, %1\n"
10354 " bne,pn %%icc, 1b\n"
10355@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10356 return tmp1;
10357 }
10358
10359-static void inline arch_read_unlock(arch_rwlock_t *lock)
10360+static inline void arch_read_unlock(arch_rwlock_t *lock)
10361 {
10362 unsigned long tmp1, tmp2;
10363
10364 __asm__ __volatile__(
10365 "1: lduw [%2], %0\n"
10366-" sub %0, 1, %1\n"
10367+" subcc %0, 1, %1\n"
10368+
10369+#ifdef CONFIG_PAX_REFCOUNT
10370+" tvs %%icc, 6\n"
10371+#endif
10372+
10373 " cas [%2], %0, %1\n"
10374 " cmp %0, %1\n"
10375 " bne,pn %%xcc, 1b\n"
10376@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10377 : "memory");
10378 }
10379
10380-static void inline arch_write_lock(arch_rwlock_t *lock)
10381+static inline void arch_write_lock(arch_rwlock_t *lock)
10382 {
10383 unsigned long mask, tmp1, tmp2;
10384
10385@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10386 : "memory");
10387 }
10388
10389-static void inline arch_write_unlock(arch_rwlock_t *lock)
10390+static inline void arch_write_unlock(arch_rwlock_t *lock)
10391 {
10392 __asm__ __volatile__(
10393 " stw %%g0, [%0]"
10394@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10395 : "memory");
10396 }
10397
10398-static int inline arch_write_trylock(arch_rwlock_t *lock)
10399+static inline int arch_write_trylock(arch_rwlock_t *lock)
10400 {
10401 unsigned long mask, tmp1, tmp2, result;
10402
10403diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10404index 025c984..a216504 100644
10405--- a/arch/sparc/include/asm/thread_info_32.h
10406+++ b/arch/sparc/include/asm/thread_info_32.h
10407@@ -49,6 +49,8 @@ struct thread_info {
10408 unsigned long w_saved;
10409
10410 struct restart_block restart_block;
10411+
10412+ unsigned long lowest_stack;
10413 };
10414
10415 /*
10416diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10417index 798f027..b009941 100644
10418--- a/arch/sparc/include/asm/thread_info_64.h
10419+++ b/arch/sparc/include/asm/thread_info_64.h
10420@@ -63,6 +63,8 @@ struct thread_info {
10421 struct pt_regs *kern_una_regs;
10422 unsigned int kern_una_insn;
10423
10424+ unsigned long lowest_stack;
10425+
10426 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10427 __attribute__ ((aligned(64)));
10428 };
10429@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10430 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10431 /* flag bit 4 is available */
10432 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10433-/* flag bit 6 is available */
10434+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10435 #define TIF_32BIT 7 /* 32-bit binary */
10436 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10437 #define TIF_SECCOMP 9 /* secure computing */
10438 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10439 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10440+
10441 /* NOTE: Thread flags >= 12 should be ones we have no interest
10442 * in using in assembly, else we can't use the mask as
10443 * an immediate value in instructions such as andcc.
10444@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10445 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10446 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10447 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10448+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10449
10450 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10451 _TIF_DO_NOTIFY_RESUME_MASK | \
10452 _TIF_NEED_RESCHED)
10453 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10454
10455+#define _TIF_WORK_SYSCALL \
10456+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10457+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10458+
10459 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10460
10461 /*
10462diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10463index bd56c28..4b63d83 100644
10464--- a/arch/sparc/include/asm/uaccess.h
10465+++ b/arch/sparc/include/asm/uaccess.h
10466@@ -1,5 +1,6 @@
10467 #ifndef ___ASM_SPARC_UACCESS_H
10468 #define ___ASM_SPARC_UACCESS_H
10469+
10470 #if defined(__sparc__) && defined(__arch64__)
10471 #include <asm/uaccess_64.h>
10472 #else
10473diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10474index 9634d08..020b2dc 100644
10475--- a/arch/sparc/include/asm/uaccess_32.h
10476+++ b/arch/sparc/include/asm/uaccess_32.h
10477@@ -47,6 +47,7 @@
10478 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10479 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10480 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
10481+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10482 #define access_ok(type, addr, size) \
10483 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10484
10485@@ -250,27 +251,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10486
10487 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10488 {
10489- if (n && __access_ok((unsigned long) to, n))
10490+ if ((long)n < 0)
10491+ return n;
10492+
10493+ if (n && __access_ok((unsigned long) to, n)) {
10494+ if (!__builtin_constant_p(n))
10495+ check_object_size(from, n, true);
10496 return __copy_user(to, (__force void __user *) from, n);
10497- else
10498+ } else
10499 return n;
10500 }
10501
10502 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10503 {
10504+ if ((long)n < 0)
10505+ return n;
10506+
10507+ if (!__builtin_constant_p(n))
10508+ check_object_size(from, n, true);
10509+
10510 return __copy_user(to, (__force void __user *) from, n);
10511 }
10512
10513 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10514 {
10515- if (n && __access_ok((unsigned long) from, n))
10516+ if ((long)n < 0)
10517+ return n;
10518+
10519+ if (n && __access_ok((unsigned long) from, n)) {
10520+ if (!__builtin_constant_p(n))
10521+ check_object_size(to, n, false);
10522 return __copy_user((__force void __user *) to, from, n);
10523- else
10524+ } else
10525 return n;
10526 }
10527
10528 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10529 {
10530+ if ((long)n < 0)
10531+ return n;
10532+
10533 return __copy_user((__force void __user *) to, from, n);
10534 }
10535
10536diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10537index c990a5e..7384856 100644
10538--- a/arch/sparc/include/asm/uaccess_64.h
10539+++ b/arch/sparc/include/asm/uaccess_64.h
10540@@ -10,6 +10,7 @@
10541 #include <linux/compiler.h>
10542 #include <linux/string.h>
10543 #include <linux/thread_info.h>
10544+#include <linux/kernel.h>
10545 #include <asm/asi.h>
10546 #include <asm/spitfire.h>
10547 #include <asm-generic/uaccess-unaligned.h>
10548@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10549 return 1;
10550 }
10551
10552+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10553+{
10554+ return 1;
10555+}
10556+
10557 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10558 {
10559 return 1;
10560@@ -214,8 +220,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10561 static inline unsigned long __must_check
10562 copy_from_user(void *to, const void __user *from, unsigned long size)
10563 {
10564- unsigned long ret = ___copy_from_user(to, from, size);
10565+ unsigned long ret;
10566
10567+ if ((long)size < 0 || size > INT_MAX)
10568+ return size;
10569+
10570+ if (!__builtin_constant_p(size))
10571+ check_object_size(to, size, false);
10572+
10573+ ret = ___copy_from_user(to, from, size);
10574 if (unlikely(ret))
10575 ret = copy_from_user_fixup(to, from, size);
10576
10577@@ -231,8 +244,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10578 static inline unsigned long __must_check
10579 copy_to_user(void __user *to, const void *from, unsigned long size)
10580 {
10581- unsigned long ret = ___copy_to_user(to, from, size);
10582+ unsigned long ret;
10583
10584+ if ((long)size < 0 || size > INT_MAX)
10585+ return size;
10586+
10587+ if (!__builtin_constant_p(size))
10588+ check_object_size(from, size, true);
10589+
10590+ ret = ___copy_to_user(to, from, size);
10591 if (unlikely(ret))
10592 ret = copy_to_user_fixup(to, from, size);
10593 return ret;
10594diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10595index 7cf9c6e..6206648 100644
10596--- a/arch/sparc/kernel/Makefile
10597+++ b/arch/sparc/kernel/Makefile
10598@@ -4,7 +4,7 @@
10599 #
10600
10601 asflags-y := -ansi
10602-ccflags-y := -Werror
10603+#ccflags-y := -Werror
10604
10605 extra-y := head_$(BITS).o
10606
10607diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10608index 50e7b62..79fae35 100644
10609--- a/arch/sparc/kernel/process_32.c
10610+++ b/arch/sparc/kernel/process_32.c
10611@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10612
10613 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10614 r->psr, r->pc, r->npc, r->y, print_tainted());
10615- printk("PC: <%pS>\n", (void *) r->pc);
10616+ printk("PC: <%pA>\n", (void *) r->pc);
10617 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10618 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10619 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10620 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10621 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10622 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10623- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10624+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10625
10626 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10627 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10628@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10629 rw = (struct reg_window32 *) fp;
10630 pc = rw->ins[7];
10631 printk("[%08lx : ", pc);
10632- printk("%pS ] ", (void *) pc);
10633+ printk("%pA ] ", (void *) pc);
10634 fp = rw->ins[6];
10635 } while (++count < 16);
10636 printk("\n");
10637diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10638index 46a5964..a35c62c 100644
10639--- a/arch/sparc/kernel/process_64.c
10640+++ b/arch/sparc/kernel/process_64.c
10641@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10642 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10643 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10644 if (regs->tstate & TSTATE_PRIV)
10645- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10646+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10647 }
10648
10649 void show_regs(struct pt_regs *regs)
10650@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10651
10652 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10653 regs->tpc, regs->tnpc, regs->y, print_tainted());
10654- printk("TPC: <%pS>\n", (void *) regs->tpc);
10655+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10656 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10657 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10658 regs->u_regs[3]);
10659@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10660 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10661 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10662 regs->u_regs[15]);
10663- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10664+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10665 show_regwindow(regs);
10666 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10667 }
10668@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10669 ((tp && tp->task) ? tp->task->pid : -1));
10670
10671 if (gp->tstate & TSTATE_PRIV) {
10672- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10673+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10674 (void *) gp->tpc,
10675 (void *) gp->o7,
10676 (void *) gp->i7,
10677diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10678index 79cc0d1..ec62734 100644
10679--- a/arch/sparc/kernel/prom_common.c
10680+++ b/arch/sparc/kernel/prom_common.c
10681@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10682
10683 unsigned int prom_early_allocated __initdata;
10684
10685-static struct of_pdt_ops prom_sparc_ops __initdata = {
10686+static struct of_pdt_ops prom_sparc_ops __initconst = {
10687 .nextprop = prom_common_nextprop,
10688 .getproplen = prom_getproplen,
10689 .getproperty = prom_getproperty,
10690diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10691index 9ddc492..27a5619 100644
10692--- a/arch/sparc/kernel/ptrace_64.c
10693+++ b/arch/sparc/kernel/ptrace_64.c
10694@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10695 return ret;
10696 }
10697
10698+#ifdef CONFIG_GRKERNSEC_SETXID
10699+extern void gr_delayed_cred_worker(void);
10700+#endif
10701+
10702 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10703 {
10704 int ret = 0;
10705@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10706 if (test_thread_flag(TIF_NOHZ))
10707 user_exit();
10708
10709+#ifdef CONFIG_GRKERNSEC_SETXID
10710+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10711+ gr_delayed_cred_worker();
10712+#endif
10713+
10714 if (test_thread_flag(TIF_SYSCALL_TRACE))
10715 ret = tracehook_report_syscall_entry(regs);
10716
10717@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10718 if (test_thread_flag(TIF_NOHZ))
10719 user_exit();
10720
10721+#ifdef CONFIG_GRKERNSEC_SETXID
10722+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10723+ gr_delayed_cred_worker();
10724+#endif
10725+
10726 audit_syscall_exit(regs);
10727
10728 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10729diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10730index da6f1a7..e5dea8f 100644
10731--- a/arch/sparc/kernel/smp_64.c
10732+++ b/arch/sparc/kernel/smp_64.c
10733@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10734 return;
10735
10736 #ifdef CONFIG_DEBUG_DCFLUSH
10737- atomic_inc(&dcpage_flushes);
10738+ atomic_inc_unchecked(&dcpage_flushes);
10739 #endif
10740
10741 this_cpu = get_cpu();
10742@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10743 xcall_deliver(data0, __pa(pg_addr),
10744 (u64) pg_addr, cpumask_of(cpu));
10745 #ifdef CONFIG_DEBUG_DCFLUSH
10746- atomic_inc(&dcpage_flushes_xcall);
10747+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10748 #endif
10749 }
10750 }
10751@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10752 preempt_disable();
10753
10754 #ifdef CONFIG_DEBUG_DCFLUSH
10755- atomic_inc(&dcpage_flushes);
10756+ atomic_inc_unchecked(&dcpage_flushes);
10757 #endif
10758 data0 = 0;
10759 pg_addr = page_address(page);
10760@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10761 xcall_deliver(data0, __pa(pg_addr),
10762 (u64) pg_addr, cpu_online_mask);
10763 #ifdef CONFIG_DEBUG_DCFLUSH
10764- atomic_inc(&dcpage_flushes_xcall);
10765+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10766 #endif
10767 }
10768 __local_flush_dcache_page(page);
10769diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10770index 646988d..b88905f 100644
10771--- a/arch/sparc/kernel/sys_sparc_32.c
10772+++ b/arch/sparc/kernel/sys_sparc_32.c
10773@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10774 if (len > TASK_SIZE - PAGE_SIZE)
10775 return -ENOMEM;
10776 if (!addr)
10777- addr = TASK_UNMAPPED_BASE;
10778+ addr = current->mm->mmap_base;
10779
10780 info.flags = 0;
10781 info.length = len;
10782diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10783index 30e7ddb..266a3b0 100644
10784--- a/arch/sparc/kernel/sys_sparc_64.c
10785+++ b/arch/sparc/kernel/sys_sparc_64.c
10786@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10787 struct vm_area_struct * vma;
10788 unsigned long task_size = TASK_SIZE;
10789 int do_color_align;
10790+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10791 struct vm_unmapped_area_info info;
10792
10793 if (flags & MAP_FIXED) {
10794 /* We do not accept a shared mapping if it would violate
10795 * cache aliasing constraints.
10796 */
10797- if ((flags & MAP_SHARED) &&
10798+ if ((filp || (flags & MAP_SHARED)) &&
10799 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10800 return -EINVAL;
10801 return addr;
10802@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10803 if (filp || (flags & MAP_SHARED))
10804 do_color_align = 1;
10805
10806+#ifdef CONFIG_PAX_RANDMMAP
10807+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10808+#endif
10809+
10810 if (addr) {
10811 if (do_color_align)
10812 addr = COLOR_ALIGN(addr, pgoff);
10813@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10814 addr = PAGE_ALIGN(addr);
10815
10816 vma = find_vma(mm, addr);
10817- if (task_size - len >= addr &&
10818- (!vma || addr + len <= vma->vm_start))
10819+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10820 return addr;
10821 }
10822
10823 info.flags = 0;
10824 info.length = len;
10825- info.low_limit = TASK_UNMAPPED_BASE;
10826+ info.low_limit = mm->mmap_base;
10827 info.high_limit = min(task_size, VA_EXCLUDE_START);
10828 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10829 info.align_offset = pgoff << PAGE_SHIFT;
10830+ info.threadstack_offset = offset;
10831 addr = vm_unmapped_area(&info);
10832
10833 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10834 VM_BUG_ON(addr != -ENOMEM);
10835 info.low_limit = VA_EXCLUDE_END;
10836+
10837+#ifdef CONFIG_PAX_RANDMMAP
10838+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10839+ info.low_limit += mm->delta_mmap;
10840+#endif
10841+
10842 info.high_limit = task_size;
10843 addr = vm_unmapped_area(&info);
10844 }
10845@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10846 unsigned long task_size = STACK_TOP32;
10847 unsigned long addr = addr0;
10848 int do_color_align;
10849+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10850 struct vm_unmapped_area_info info;
10851
10852 /* This should only ever run for 32-bit processes. */
10853@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10854 /* We do not accept a shared mapping if it would violate
10855 * cache aliasing constraints.
10856 */
10857- if ((flags & MAP_SHARED) &&
10858+ if ((filp || (flags & MAP_SHARED)) &&
10859 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10860 return -EINVAL;
10861 return addr;
10862@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10863 if (filp || (flags & MAP_SHARED))
10864 do_color_align = 1;
10865
10866+#ifdef CONFIG_PAX_RANDMMAP
10867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10868+#endif
10869+
10870 /* requesting a specific address */
10871 if (addr) {
10872 if (do_color_align)
10873@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10874 addr = PAGE_ALIGN(addr);
10875
10876 vma = find_vma(mm, addr);
10877- if (task_size - len >= addr &&
10878- (!vma || addr + len <= vma->vm_start))
10879+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10880 return addr;
10881 }
10882
10883@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10884 info.high_limit = mm->mmap_base;
10885 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10886 info.align_offset = pgoff << PAGE_SHIFT;
10887+ info.threadstack_offset = offset;
10888 addr = vm_unmapped_area(&info);
10889
10890 /*
10891@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10892 VM_BUG_ON(addr != -ENOMEM);
10893 info.flags = 0;
10894 info.low_limit = TASK_UNMAPPED_BASE;
10895+
10896+#ifdef CONFIG_PAX_RANDMMAP
10897+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10898+ info.low_limit += mm->delta_mmap;
10899+#endif
10900+
10901 info.high_limit = STACK_TOP32;
10902 addr = vm_unmapped_area(&info);
10903 }
10904@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10905 EXPORT_SYMBOL(get_fb_unmapped_area);
10906
10907 /* Essentially the same as PowerPC. */
10908-static unsigned long mmap_rnd(void)
10909+static unsigned long mmap_rnd(struct mm_struct *mm)
10910 {
10911 unsigned long rnd = 0UL;
10912
10913+#ifdef CONFIG_PAX_RANDMMAP
10914+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10915+#endif
10916+
10917 if (current->flags & PF_RANDOMIZE) {
10918 unsigned long val = get_random_int();
10919 if (test_thread_flag(TIF_32BIT))
10920@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10921
10922 void arch_pick_mmap_layout(struct mm_struct *mm)
10923 {
10924- unsigned long random_factor = mmap_rnd();
10925+ unsigned long random_factor = mmap_rnd(mm);
10926 unsigned long gap;
10927
10928 /*
10929@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10930 gap == RLIM_INFINITY ||
10931 sysctl_legacy_va_layout) {
10932 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10933+
10934+#ifdef CONFIG_PAX_RANDMMAP
10935+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10936+ mm->mmap_base += mm->delta_mmap;
10937+#endif
10938+
10939 mm->get_unmapped_area = arch_get_unmapped_area;
10940 } else {
10941 /* We know it's 32-bit */
10942@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10943 gap = (task_size / 6 * 5);
10944
10945 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10946+
10947+#ifdef CONFIG_PAX_RANDMMAP
10948+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10949+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10950+#endif
10951+
10952 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10953 }
10954 }
10955diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10956index bb00089..e0ea580 100644
10957--- a/arch/sparc/kernel/syscalls.S
10958+++ b/arch/sparc/kernel/syscalls.S
10959@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10960 #endif
10961 .align 32
10962 1: ldx [%g6 + TI_FLAGS], %l5
10963- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10964+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10965 be,pt %icc, rtrap
10966 nop
10967 call syscall_trace_leave
10968@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10969
10970 srl %i3, 0, %o3 ! IEU0
10971 srl %i2, 0, %o2 ! IEU0 Group
10972- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10973+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10974 bne,pn %icc, linux_syscall_trace32 ! CTI
10975 mov %i0, %l5 ! IEU1
10976 5: call %l7 ! CTI Group brk forced
10977@@ -218,7 +218,7 @@ linux_sparc_syscall:
10978
10979 mov %i3, %o3 ! IEU1
10980 mov %i4, %o4 ! IEU0 Group
10981- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10982+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10983 bne,pn %icc, linux_syscall_trace ! CTI Group
10984 mov %i0, %l5 ! IEU0
10985 2: call %l7 ! CTI Group brk forced
10986@@ -233,7 +233,7 @@ ret_sys_call:
10987
10988 cmp %o0, -ERESTART_RESTARTBLOCK
10989 bgeu,pn %xcc, 1f
10990- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10991+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10992 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10993
10994 2:
10995diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10996index 6fd386c5..6907d81 100644
10997--- a/arch/sparc/kernel/traps_32.c
10998+++ b/arch/sparc/kernel/traps_32.c
10999@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11000 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11001 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11002
11003+extern void gr_handle_kernel_exploit(void);
11004+
11005 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11006 {
11007 static int die_counter;
11008@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11009 count++ < 30 &&
11010 (((unsigned long) rw) >= PAGE_OFFSET) &&
11011 !(((unsigned long) rw) & 0x7)) {
11012- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11013+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11014 (void *) rw->ins[7]);
11015 rw = (struct reg_window32 *)rw->ins[6];
11016 }
11017 }
11018 printk("Instruction DUMP:");
11019 instruction_dump ((unsigned long *) regs->pc);
11020- if(regs->psr & PSR_PS)
11021+ if(regs->psr & PSR_PS) {
11022+ gr_handle_kernel_exploit();
11023 do_exit(SIGKILL);
11024+ }
11025 do_exit(SIGSEGV);
11026 }
11027
11028diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11029index 981a769..d906eda 100644
11030--- a/arch/sparc/kernel/traps_64.c
11031+++ b/arch/sparc/kernel/traps_64.c
11032@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11033 i + 1,
11034 p->trapstack[i].tstate, p->trapstack[i].tpc,
11035 p->trapstack[i].tnpc, p->trapstack[i].tt);
11036- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11037+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11038 }
11039 }
11040
11041@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11042
11043 lvl -= 0x100;
11044 if (regs->tstate & TSTATE_PRIV) {
11045+
11046+#ifdef CONFIG_PAX_REFCOUNT
11047+ if (lvl == 6)
11048+ pax_report_refcount_overflow(regs);
11049+#endif
11050+
11051 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11052 die_if_kernel(buffer, regs);
11053 }
11054@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11055 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11056 {
11057 char buffer[32];
11058-
11059+
11060 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11061 0, lvl, SIGTRAP) == NOTIFY_STOP)
11062 return;
11063
11064+#ifdef CONFIG_PAX_REFCOUNT
11065+ if (lvl == 6)
11066+ pax_report_refcount_overflow(regs);
11067+#endif
11068+
11069 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11070
11071 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11072@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11073 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11074 printk("%s" "ERROR(%d): ",
11075 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11076- printk("TPC<%pS>\n", (void *) regs->tpc);
11077+ printk("TPC<%pA>\n", (void *) regs->tpc);
11078 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11079 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11080 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11081@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11082 smp_processor_id(),
11083 (type & 0x1) ? 'I' : 'D',
11084 regs->tpc);
11085- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11086+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11087 panic("Irrecoverable Cheetah+ parity error.");
11088 }
11089
11090@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11091 smp_processor_id(),
11092 (type & 0x1) ? 'I' : 'D',
11093 regs->tpc);
11094- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11095+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11096 }
11097
11098 struct sun4v_error_entry {
11099@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11100 /*0x38*/u64 reserved_5;
11101 };
11102
11103-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11104-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11105+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11106+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11107
11108 static const char *sun4v_err_type_to_str(u8 type)
11109 {
11110@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11111 }
11112
11113 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11114- int cpu, const char *pfx, atomic_t *ocnt)
11115+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11116 {
11117 u64 *raw_ptr = (u64 *) ent;
11118 u32 attrs;
11119@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11120
11121 show_regs(regs);
11122
11123- if ((cnt = atomic_read(ocnt)) != 0) {
11124- atomic_set(ocnt, 0);
11125+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11126+ atomic_set_unchecked(ocnt, 0);
11127 wmb();
11128 printk("%s: Queue overflowed %d times.\n",
11129 pfx, cnt);
11130@@ -2048,7 +2059,7 @@ out:
11131 */
11132 void sun4v_resum_overflow(struct pt_regs *regs)
11133 {
11134- atomic_inc(&sun4v_resum_oflow_cnt);
11135+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11136 }
11137
11138 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11139@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11140 /* XXX Actually even this can make not that much sense. Perhaps
11141 * XXX we should just pull the plug and panic directly from here?
11142 */
11143- atomic_inc(&sun4v_nonresum_oflow_cnt);
11144+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11145 }
11146
11147 static void sun4v_tlb_error(struct pt_regs *regs)
11148@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11149
11150 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11151 regs->tpc, tl);
11152- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11153+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11154 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11155- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11156+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11157 (void *) regs->u_regs[UREG_I7]);
11158 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11159 "pte[%lx] error[%lx]\n",
11160@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11161
11162 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11163 regs->tpc, tl);
11164- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11165+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11166 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11167- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11168+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11169 (void *) regs->u_regs[UREG_I7]);
11170 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11171 "pte[%lx] error[%lx]\n",
11172@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11173 fp = (unsigned long)sf->fp + STACK_BIAS;
11174 }
11175
11176- printk(" [%016lx] %pS\n", pc, (void *) pc);
11177+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11178 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11179 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11180 int index = tsk->curr_ret_stack;
11181 if (tsk->ret_stack && index >= graph) {
11182 pc = tsk->ret_stack[index - graph].ret;
11183- printk(" [%016lx] %pS\n", pc, (void *) pc);
11184+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11185 graph++;
11186 }
11187 }
11188@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11189 return (struct reg_window *) (fp + STACK_BIAS);
11190 }
11191
11192+extern void gr_handle_kernel_exploit(void);
11193+
11194 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11195 {
11196 static int die_counter;
11197@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11198 while (rw &&
11199 count++ < 30 &&
11200 kstack_valid(tp, (unsigned long) rw)) {
11201- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11202+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11203 (void *) rw->ins[7]);
11204
11205 rw = kernel_stack_up(rw);
11206@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11207 }
11208 user_instruction_dump ((unsigned int __user *) regs->tpc);
11209 }
11210- if (regs->tstate & TSTATE_PRIV)
11211+ if (regs->tstate & TSTATE_PRIV) {
11212+ gr_handle_kernel_exploit();
11213 do_exit(SIGKILL);
11214+ }
11215 do_exit(SIGSEGV);
11216 }
11217 EXPORT_SYMBOL(die_if_kernel);
11218diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11219index 62098a8..547ab2c 100644
11220--- a/arch/sparc/kernel/unaligned_64.c
11221+++ b/arch/sparc/kernel/unaligned_64.c
11222@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11223 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11224
11225 if (__ratelimit(&ratelimit)) {
11226- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11227+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11228 regs->tpc, (void *) regs->tpc);
11229 }
11230 }
11231diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11232index 3269b02..64f5231 100644
11233--- a/arch/sparc/lib/Makefile
11234+++ b/arch/sparc/lib/Makefile
11235@@ -2,7 +2,7 @@
11236 #
11237
11238 asflags-y := -ansi -DST_DIV0=0x02
11239-ccflags-y := -Werror
11240+#ccflags-y := -Werror
11241
11242 lib-$(CONFIG_SPARC32) += ashrdi3.o
11243 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11244diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11245index 05dac43..76f8ed4 100644
11246--- a/arch/sparc/lib/atomic_64.S
11247+++ b/arch/sparc/lib/atomic_64.S
11248@@ -15,11 +15,22 @@
11249 * a value and does the barriers.
11250 */
11251
11252-#define ATOMIC_OP(op) \
11253-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11254+#ifdef CONFIG_PAX_REFCOUNT
11255+#define __REFCOUNT_OP(op) op##cc
11256+#define __OVERFLOW_IOP tvs %icc, 6;
11257+#define __OVERFLOW_XOP tvs %xcc, 6;
11258+#else
11259+#define __REFCOUNT_OP(op) op
11260+#define __OVERFLOW_IOP
11261+#define __OVERFLOW_XOP
11262+#endif
11263+
11264+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11265+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11266 BACKOFF_SETUP(%o2); \
11267 1: lduw [%o1], %g1; \
11268- op %g1, %o0, %g7; \
11269+ asm_op %g1, %o0, %g7; \
11270+ post_op \
11271 cas [%o1], %g1, %g7; \
11272 cmp %g1, %g7; \
11273 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11274@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11275 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11276 ENDPROC(atomic_##op); \
11277
11278-#define ATOMIC_OP_RETURN(op) \
11279-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11280+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11281+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11282+
11283+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11284+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11285 BACKOFF_SETUP(%o2); \
11286 1: lduw [%o1], %g1; \
11287- op %g1, %o0, %g7; \
11288+ asm_op %g1, %o0, %g7; \
11289+ post_op \
11290 cas [%o1], %g1, %g7; \
11291 cmp %g1, %g7; \
11292 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11293@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11294 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11295 ENDPROC(atomic_##op##_return);
11296
11297+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11298+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11299+
11300 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11301
11302 ATOMIC_OPS(add)
11303@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11304
11305 #undef ATOMIC_OPS
11306 #undef ATOMIC_OP_RETURN
11307+#undef __ATOMIC_OP_RETURN
11308 #undef ATOMIC_OP
11309+#undef __ATOMIC_OP
11310
11311-#define ATOMIC64_OP(op) \
11312-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11313+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11314+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11315 BACKOFF_SETUP(%o2); \
11316 1: ldx [%o1], %g1; \
11317- op %g1, %o0, %g7; \
11318+ asm_op %g1, %o0, %g7; \
11319+ post_op \
11320 casx [%o1], %g1, %g7; \
11321 cmp %g1, %g7; \
11322 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11323@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11324 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11325 ENDPROC(atomic64_##op); \
11326
11327-#define ATOMIC64_OP_RETURN(op) \
11328-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11329+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11330+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11331+
11332+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11333+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11334 BACKOFF_SETUP(%o2); \
11335 1: ldx [%o1], %g1; \
11336- op %g1, %o0, %g7; \
11337+ asm_op %g1, %o0, %g7; \
11338+ post_op \
11339 casx [%o1], %g1, %g7; \
11340 cmp %g1, %g7; \
11341 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11342@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11343 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11344 ENDPROC(atomic64_##op##_return);
11345
11346+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11347+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11348+
11349 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11350
11351 ATOMIC64_OPS(add)
11352@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11353
11354 #undef ATOMIC64_OPS
11355 #undef ATOMIC64_OP_RETURN
11356+#undef __ATOMIC64_OP_RETURN
11357 #undef ATOMIC64_OP
11358+#undef __ATOMIC64_OP
11359+#undef __OVERFLOW_XOP
11360+#undef __OVERFLOW_IOP
11361+#undef __REFCOUNT_OP
11362
11363 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11364 BACKOFF_SETUP(%o2)
11365diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11366index 1d649a9..fbc5bfc 100644
11367--- a/arch/sparc/lib/ksyms.c
11368+++ b/arch/sparc/lib/ksyms.c
11369@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11370 /* Atomic counter implementation. */
11371 #define ATOMIC_OP(op) \
11372 EXPORT_SYMBOL(atomic_##op); \
11373-EXPORT_SYMBOL(atomic64_##op);
11374+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11375+EXPORT_SYMBOL(atomic64_##op); \
11376+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11377
11378 #define ATOMIC_OP_RETURN(op) \
11379 EXPORT_SYMBOL(atomic_##op##_return); \
11380@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11381 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11382
11383 ATOMIC_OPS(add)
11384+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11385+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11386 ATOMIC_OPS(sub)
11387
11388 #undef ATOMIC_OPS
11389diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11390index 30c3ecc..736f015 100644
11391--- a/arch/sparc/mm/Makefile
11392+++ b/arch/sparc/mm/Makefile
11393@@ -2,7 +2,7 @@
11394 #
11395
11396 asflags-y := -ansi
11397-ccflags-y := -Werror
11398+#ccflags-y := -Werror
11399
11400 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11401 obj-y += fault_$(BITS).o
11402diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11403index 70d8171..274c6c0 100644
11404--- a/arch/sparc/mm/fault_32.c
11405+++ b/arch/sparc/mm/fault_32.c
11406@@ -21,6 +21,9 @@
11407 #include <linux/perf_event.h>
11408 #include <linux/interrupt.h>
11409 #include <linux/kdebug.h>
11410+#include <linux/slab.h>
11411+#include <linux/pagemap.h>
11412+#include <linux/compiler.h>
11413
11414 #include <asm/page.h>
11415 #include <asm/pgtable.h>
11416@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11417 return safe_compute_effective_address(regs, insn);
11418 }
11419
11420+#ifdef CONFIG_PAX_PAGEEXEC
11421+#ifdef CONFIG_PAX_DLRESOLVE
11422+static void pax_emuplt_close(struct vm_area_struct *vma)
11423+{
11424+ vma->vm_mm->call_dl_resolve = 0UL;
11425+}
11426+
11427+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11428+{
11429+ unsigned int *kaddr;
11430+
11431+ vmf->page = alloc_page(GFP_HIGHUSER);
11432+ if (!vmf->page)
11433+ return VM_FAULT_OOM;
11434+
11435+ kaddr = kmap(vmf->page);
11436+ memset(kaddr, 0, PAGE_SIZE);
11437+ kaddr[0] = 0x9DE3BFA8U; /* save */
11438+ flush_dcache_page(vmf->page);
11439+ kunmap(vmf->page);
11440+ return VM_FAULT_MAJOR;
11441+}
11442+
11443+static const struct vm_operations_struct pax_vm_ops = {
11444+ .close = pax_emuplt_close,
11445+ .fault = pax_emuplt_fault
11446+};
11447+
11448+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11449+{
11450+ int ret;
11451+
11452+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11453+ vma->vm_mm = current->mm;
11454+ vma->vm_start = addr;
11455+ vma->vm_end = addr + PAGE_SIZE;
11456+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11457+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11458+ vma->vm_ops = &pax_vm_ops;
11459+
11460+ ret = insert_vm_struct(current->mm, vma);
11461+ if (ret)
11462+ return ret;
11463+
11464+ ++current->mm->total_vm;
11465+ return 0;
11466+}
11467+#endif
11468+
11469+/*
11470+ * PaX: decide what to do with offenders (regs->pc = fault address)
11471+ *
11472+ * returns 1 when task should be killed
11473+ * 2 when patched PLT trampoline was detected
11474+ * 3 when unpatched PLT trampoline was detected
11475+ */
11476+static int pax_handle_fetch_fault(struct pt_regs *regs)
11477+{
11478+
11479+#ifdef CONFIG_PAX_EMUPLT
11480+ int err;
11481+
11482+ do { /* PaX: patched PLT emulation #1 */
11483+ unsigned int sethi1, sethi2, jmpl;
11484+
11485+ err = get_user(sethi1, (unsigned int *)regs->pc);
11486+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11487+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11488+
11489+ if (err)
11490+ break;
11491+
11492+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11493+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11494+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11495+ {
11496+ unsigned int addr;
11497+
11498+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11499+ addr = regs->u_regs[UREG_G1];
11500+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11501+ regs->pc = addr;
11502+ regs->npc = addr+4;
11503+ return 2;
11504+ }
11505+ } while (0);
11506+
11507+ do { /* PaX: patched PLT emulation #2 */
11508+ unsigned int ba;
11509+
11510+ err = get_user(ba, (unsigned int *)regs->pc);
11511+
11512+ if (err)
11513+ break;
11514+
11515+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11516+ unsigned int addr;
11517+
11518+ if ((ba & 0xFFC00000U) == 0x30800000U)
11519+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11520+ else
11521+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11522+ regs->pc = addr;
11523+ regs->npc = addr+4;
11524+ return 2;
11525+ }
11526+ } while (0);
11527+
11528+ do { /* PaX: patched PLT emulation #3 */
11529+ unsigned int sethi, bajmpl, nop;
11530+
11531+ err = get_user(sethi, (unsigned int *)regs->pc);
11532+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11533+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11534+
11535+ if (err)
11536+ break;
11537+
11538+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11539+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11540+ nop == 0x01000000U)
11541+ {
11542+ unsigned int addr;
11543+
11544+ addr = (sethi & 0x003FFFFFU) << 10;
11545+ regs->u_regs[UREG_G1] = addr;
11546+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11547+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11548+ else
11549+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11550+ regs->pc = addr;
11551+ regs->npc = addr+4;
11552+ return 2;
11553+ }
11554+ } while (0);
11555+
11556+ do { /* PaX: unpatched PLT emulation step 1 */
11557+ unsigned int sethi, ba, nop;
11558+
11559+ err = get_user(sethi, (unsigned int *)regs->pc);
11560+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11561+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11562+
11563+ if (err)
11564+ break;
11565+
11566+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11567+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11568+ nop == 0x01000000U)
11569+ {
11570+ unsigned int addr, save, call;
11571+
11572+ if ((ba & 0xFFC00000U) == 0x30800000U)
11573+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11574+ else
11575+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11576+
11577+ err = get_user(save, (unsigned int *)addr);
11578+ err |= get_user(call, (unsigned int *)(addr+4));
11579+ err |= get_user(nop, (unsigned int *)(addr+8));
11580+ if (err)
11581+ break;
11582+
11583+#ifdef CONFIG_PAX_DLRESOLVE
11584+ if (save == 0x9DE3BFA8U &&
11585+ (call & 0xC0000000U) == 0x40000000U &&
11586+ nop == 0x01000000U)
11587+ {
11588+ struct vm_area_struct *vma;
11589+ unsigned long call_dl_resolve;
11590+
11591+ down_read(&current->mm->mmap_sem);
11592+ call_dl_resolve = current->mm->call_dl_resolve;
11593+ up_read(&current->mm->mmap_sem);
11594+ if (likely(call_dl_resolve))
11595+ goto emulate;
11596+
11597+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11598+
11599+ down_write(&current->mm->mmap_sem);
11600+ if (current->mm->call_dl_resolve) {
11601+ call_dl_resolve = current->mm->call_dl_resolve;
11602+ up_write(&current->mm->mmap_sem);
11603+ if (vma)
11604+ kmem_cache_free(vm_area_cachep, vma);
11605+ goto emulate;
11606+ }
11607+
11608+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11609+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11610+ up_write(&current->mm->mmap_sem);
11611+ if (vma)
11612+ kmem_cache_free(vm_area_cachep, vma);
11613+ return 1;
11614+ }
11615+
11616+ if (pax_insert_vma(vma, call_dl_resolve)) {
11617+ up_write(&current->mm->mmap_sem);
11618+ kmem_cache_free(vm_area_cachep, vma);
11619+ return 1;
11620+ }
11621+
11622+ current->mm->call_dl_resolve = call_dl_resolve;
11623+ up_write(&current->mm->mmap_sem);
11624+
11625+emulate:
11626+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11627+ regs->pc = call_dl_resolve;
11628+ regs->npc = addr+4;
11629+ return 3;
11630+ }
11631+#endif
11632+
11633+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11634+ if ((save & 0xFFC00000U) == 0x05000000U &&
11635+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11636+ nop == 0x01000000U)
11637+ {
11638+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11639+ regs->u_regs[UREG_G2] = addr + 4;
11640+ addr = (save & 0x003FFFFFU) << 10;
11641+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11642+ regs->pc = addr;
11643+ regs->npc = addr+4;
11644+ return 3;
11645+ }
11646+ }
11647+ } while (0);
11648+
11649+ do { /* PaX: unpatched PLT emulation step 2 */
11650+ unsigned int save, call, nop;
11651+
11652+ err = get_user(save, (unsigned int *)(regs->pc-4));
11653+ err |= get_user(call, (unsigned int *)regs->pc);
11654+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11655+ if (err)
11656+ break;
11657+
11658+ if (save == 0x9DE3BFA8U &&
11659+ (call & 0xC0000000U) == 0x40000000U &&
11660+ nop == 0x01000000U)
11661+ {
11662+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11663+
11664+ regs->u_regs[UREG_RETPC] = regs->pc;
11665+ regs->pc = dl_resolve;
11666+ regs->npc = dl_resolve+4;
11667+ return 3;
11668+ }
11669+ } while (0);
11670+#endif
11671+
11672+ return 1;
11673+}
11674+
11675+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11676+{
11677+ unsigned long i;
11678+
11679+ printk(KERN_ERR "PAX: bytes at PC: ");
11680+ for (i = 0; i < 8; i++) {
11681+ unsigned int c;
11682+ if (get_user(c, (unsigned int *)pc+i))
11683+ printk(KERN_CONT "???????? ");
11684+ else
11685+ printk(KERN_CONT "%08x ", c);
11686+ }
11687+ printk("\n");
11688+}
11689+#endif
11690+
11691 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11692 int text_fault)
11693 {
11694@@ -226,6 +500,24 @@ good_area:
11695 if (!(vma->vm_flags & VM_WRITE))
11696 goto bad_area;
11697 } else {
11698+
11699+#ifdef CONFIG_PAX_PAGEEXEC
11700+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11701+ up_read(&mm->mmap_sem);
11702+ switch (pax_handle_fetch_fault(regs)) {
11703+
11704+#ifdef CONFIG_PAX_EMUPLT
11705+ case 2:
11706+ case 3:
11707+ return;
11708+#endif
11709+
11710+ }
11711+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11712+ do_group_exit(SIGKILL);
11713+ }
11714+#endif
11715+
11716 /* Allow reads even for write-only mappings */
11717 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11718 goto bad_area;
11719diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11720index 4798232..f76e3aa 100644
11721--- a/arch/sparc/mm/fault_64.c
11722+++ b/arch/sparc/mm/fault_64.c
11723@@ -22,6 +22,9 @@
11724 #include <linux/kdebug.h>
11725 #include <linux/percpu.h>
11726 #include <linux/context_tracking.h>
11727+#include <linux/slab.h>
11728+#include <linux/pagemap.h>
11729+#include <linux/compiler.h>
11730
11731 #include <asm/page.h>
11732 #include <asm/pgtable.h>
11733@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11734 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11735 regs->tpc);
11736 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11737- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11738+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11739 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11740 dump_stack();
11741 unhandled_fault(regs->tpc, current, regs);
11742@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11743 show_regs(regs);
11744 }
11745
11746+#ifdef CONFIG_PAX_PAGEEXEC
11747+#ifdef CONFIG_PAX_DLRESOLVE
11748+static void pax_emuplt_close(struct vm_area_struct *vma)
11749+{
11750+ vma->vm_mm->call_dl_resolve = 0UL;
11751+}
11752+
11753+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11754+{
11755+ unsigned int *kaddr;
11756+
11757+ vmf->page = alloc_page(GFP_HIGHUSER);
11758+ if (!vmf->page)
11759+ return VM_FAULT_OOM;
11760+
11761+ kaddr = kmap(vmf->page);
11762+ memset(kaddr, 0, PAGE_SIZE);
11763+ kaddr[0] = 0x9DE3BFA8U; /* save */
11764+ flush_dcache_page(vmf->page);
11765+ kunmap(vmf->page);
11766+ return VM_FAULT_MAJOR;
11767+}
11768+
11769+static const struct vm_operations_struct pax_vm_ops = {
11770+ .close = pax_emuplt_close,
11771+ .fault = pax_emuplt_fault
11772+};
11773+
11774+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11775+{
11776+ int ret;
11777+
11778+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11779+ vma->vm_mm = current->mm;
11780+ vma->vm_start = addr;
11781+ vma->vm_end = addr + PAGE_SIZE;
11782+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11783+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11784+ vma->vm_ops = &pax_vm_ops;
11785+
11786+ ret = insert_vm_struct(current->mm, vma);
11787+ if (ret)
11788+ return ret;
11789+
11790+ ++current->mm->total_vm;
11791+ return 0;
11792+}
11793+#endif
11794+
11795+/*
11796+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11797+ *
11798+ * returns 1 when task should be killed
11799+ * 2 when patched PLT trampoline was detected
11800+ * 3 when unpatched PLT trampoline was detected
11801+ */
11802+static int pax_handle_fetch_fault(struct pt_regs *regs)
11803+{
11804+
11805+#ifdef CONFIG_PAX_EMUPLT
11806+ int err;
11807+
11808+ do { /* PaX: patched PLT emulation #1 */
11809+ unsigned int sethi1, sethi2, jmpl;
11810+
11811+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11812+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11814+
11815+ if (err)
11816+ break;
11817+
11818+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11819+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11820+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11821+ {
11822+ unsigned long addr;
11823+
11824+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11825+ addr = regs->u_regs[UREG_G1];
11826+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11827+
11828+ if (test_thread_flag(TIF_32BIT))
11829+ addr &= 0xFFFFFFFFUL;
11830+
11831+ regs->tpc = addr;
11832+ regs->tnpc = addr+4;
11833+ return 2;
11834+ }
11835+ } while (0);
11836+
11837+ do { /* PaX: patched PLT emulation #2 */
11838+ unsigned int ba;
11839+
11840+ err = get_user(ba, (unsigned int *)regs->tpc);
11841+
11842+ if (err)
11843+ break;
11844+
11845+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11846+ unsigned long addr;
11847+
11848+ if ((ba & 0xFFC00000U) == 0x30800000U)
11849+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11850+ else
11851+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11852+
11853+ if (test_thread_flag(TIF_32BIT))
11854+ addr &= 0xFFFFFFFFUL;
11855+
11856+ regs->tpc = addr;
11857+ regs->tnpc = addr+4;
11858+ return 2;
11859+ }
11860+ } while (0);
11861+
11862+ do { /* PaX: patched PLT emulation #3 */
11863+ unsigned int sethi, bajmpl, nop;
11864+
11865+ err = get_user(sethi, (unsigned int *)regs->tpc);
11866+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11867+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11868+
11869+ if (err)
11870+ break;
11871+
11872+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11873+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11874+ nop == 0x01000000U)
11875+ {
11876+ unsigned long addr;
11877+
11878+ addr = (sethi & 0x003FFFFFU) << 10;
11879+ regs->u_regs[UREG_G1] = addr;
11880+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11881+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11882+ else
11883+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11884+
11885+ if (test_thread_flag(TIF_32BIT))
11886+ addr &= 0xFFFFFFFFUL;
11887+
11888+ regs->tpc = addr;
11889+ regs->tnpc = addr+4;
11890+ return 2;
11891+ }
11892+ } while (0);
11893+
11894+ do { /* PaX: patched PLT emulation #4 */
11895+ unsigned int sethi, mov1, call, mov2;
11896+
11897+ err = get_user(sethi, (unsigned int *)regs->tpc);
11898+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11899+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11900+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11901+
11902+ if (err)
11903+ break;
11904+
11905+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11906+ mov1 == 0x8210000FU &&
11907+ (call & 0xC0000000U) == 0x40000000U &&
11908+ mov2 == 0x9E100001U)
11909+ {
11910+ unsigned long addr;
11911+
11912+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11913+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11914+
11915+ if (test_thread_flag(TIF_32BIT))
11916+ addr &= 0xFFFFFFFFUL;
11917+
11918+ regs->tpc = addr;
11919+ regs->tnpc = addr+4;
11920+ return 2;
11921+ }
11922+ } while (0);
11923+
11924+ do { /* PaX: patched PLT emulation #5 */
11925+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11926+
11927+ err = get_user(sethi, (unsigned int *)regs->tpc);
11928+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11929+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11930+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11931+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11932+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11933+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11934+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11935+
11936+ if (err)
11937+ break;
11938+
11939+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11940+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11941+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11942+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11943+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11944+ sllx == 0x83287020U &&
11945+ jmpl == 0x81C04005U &&
11946+ nop == 0x01000000U)
11947+ {
11948+ unsigned long addr;
11949+
11950+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11951+ regs->u_regs[UREG_G1] <<= 32;
11952+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11953+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11954+ regs->tpc = addr;
11955+ regs->tnpc = addr+4;
11956+ return 2;
11957+ }
11958+ } while (0);
11959+
11960+ do { /* PaX: patched PLT emulation #6 */
11961+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11962+
11963+ err = get_user(sethi, (unsigned int *)regs->tpc);
11964+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11965+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11966+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11967+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11968+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11969+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11970+
11971+ if (err)
11972+ break;
11973+
11974+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11975+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11976+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11977+ sllx == 0x83287020U &&
11978+ (or & 0xFFFFE000U) == 0x8A116000U &&
11979+ jmpl == 0x81C04005U &&
11980+ nop == 0x01000000U)
11981+ {
11982+ unsigned long addr;
11983+
11984+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11985+ regs->u_regs[UREG_G1] <<= 32;
11986+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11987+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11988+ regs->tpc = addr;
11989+ regs->tnpc = addr+4;
11990+ return 2;
11991+ }
11992+ } while (0);
11993+
11994+ do { /* PaX: unpatched PLT emulation step 1 */
11995+ unsigned int sethi, ba, nop;
11996+
11997+ err = get_user(sethi, (unsigned int *)regs->tpc);
11998+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11999+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12000+
12001+ if (err)
12002+ break;
12003+
12004+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12005+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12006+ nop == 0x01000000U)
12007+ {
12008+ unsigned long addr;
12009+ unsigned int save, call;
12010+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12011+
12012+ if ((ba & 0xFFC00000U) == 0x30800000U)
12013+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12014+ else
12015+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12016+
12017+ if (test_thread_flag(TIF_32BIT))
12018+ addr &= 0xFFFFFFFFUL;
12019+
12020+ err = get_user(save, (unsigned int *)addr);
12021+ err |= get_user(call, (unsigned int *)(addr+4));
12022+ err |= get_user(nop, (unsigned int *)(addr+8));
12023+ if (err)
12024+ break;
12025+
12026+#ifdef CONFIG_PAX_DLRESOLVE
12027+ if (save == 0x9DE3BFA8U &&
12028+ (call & 0xC0000000U) == 0x40000000U &&
12029+ nop == 0x01000000U)
12030+ {
12031+ struct vm_area_struct *vma;
12032+ unsigned long call_dl_resolve;
12033+
12034+ down_read(&current->mm->mmap_sem);
12035+ call_dl_resolve = current->mm->call_dl_resolve;
12036+ up_read(&current->mm->mmap_sem);
12037+ if (likely(call_dl_resolve))
12038+ goto emulate;
12039+
12040+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12041+
12042+ down_write(&current->mm->mmap_sem);
12043+ if (current->mm->call_dl_resolve) {
12044+ call_dl_resolve = current->mm->call_dl_resolve;
12045+ up_write(&current->mm->mmap_sem);
12046+ if (vma)
12047+ kmem_cache_free(vm_area_cachep, vma);
12048+ goto emulate;
12049+ }
12050+
12051+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12052+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12053+ up_write(&current->mm->mmap_sem);
12054+ if (vma)
12055+ kmem_cache_free(vm_area_cachep, vma);
12056+ return 1;
12057+ }
12058+
12059+ if (pax_insert_vma(vma, call_dl_resolve)) {
12060+ up_write(&current->mm->mmap_sem);
12061+ kmem_cache_free(vm_area_cachep, vma);
12062+ return 1;
12063+ }
12064+
12065+ current->mm->call_dl_resolve = call_dl_resolve;
12066+ up_write(&current->mm->mmap_sem);
12067+
12068+emulate:
12069+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12070+ regs->tpc = call_dl_resolve;
12071+ regs->tnpc = addr+4;
12072+ return 3;
12073+ }
12074+#endif
12075+
12076+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12077+ if ((save & 0xFFC00000U) == 0x05000000U &&
12078+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12079+ nop == 0x01000000U)
12080+ {
12081+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12082+ regs->u_regs[UREG_G2] = addr + 4;
12083+ addr = (save & 0x003FFFFFU) << 10;
12084+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12085+
12086+ if (test_thread_flag(TIF_32BIT))
12087+ addr &= 0xFFFFFFFFUL;
12088+
12089+ regs->tpc = addr;
12090+ regs->tnpc = addr+4;
12091+ return 3;
12092+ }
12093+
12094+ /* PaX: 64-bit PLT stub */
12095+ err = get_user(sethi1, (unsigned int *)addr);
12096+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12097+ err |= get_user(or1, (unsigned int *)(addr+8));
12098+ err |= get_user(or2, (unsigned int *)(addr+12));
12099+ err |= get_user(sllx, (unsigned int *)(addr+16));
12100+ err |= get_user(add, (unsigned int *)(addr+20));
12101+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12102+ err |= get_user(nop, (unsigned int *)(addr+28));
12103+ if (err)
12104+ break;
12105+
12106+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12107+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12108+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12109+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12110+ sllx == 0x89293020U &&
12111+ add == 0x8A010005U &&
12112+ jmpl == 0x89C14000U &&
12113+ nop == 0x01000000U)
12114+ {
12115+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12116+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12117+ regs->u_regs[UREG_G4] <<= 32;
12118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12119+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12120+ regs->u_regs[UREG_G4] = addr + 24;
12121+ addr = regs->u_regs[UREG_G5];
12122+ regs->tpc = addr;
12123+ regs->tnpc = addr+4;
12124+ return 3;
12125+ }
12126+ }
12127+ } while (0);
12128+
12129+#ifdef CONFIG_PAX_DLRESOLVE
12130+ do { /* PaX: unpatched PLT emulation step 2 */
12131+ unsigned int save, call, nop;
12132+
12133+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12134+ err |= get_user(call, (unsigned int *)regs->tpc);
12135+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12136+ if (err)
12137+ break;
12138+
12139+ if (save == 0x9DE3BFA8U &&
12140+ (call & 0xC0000000U) == 0x40000000U &&
12141+ nop == 0x01000000U)
12142+ {
12143+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12144+
12145+ if (test_thread_flag(TIF_32BIT))
12146+ dl_resolve &= 0xFFFFFFFFUL;
12147+
12148+ regs->u_regs[UREG_RETPC] = regs->tpc;
12149+ regs->tpc = dl_resolve;
12150+ regs->tnpc = dl_resolve+4;
12151+ return 3;
12152+ }
12153+ } while (0);
12154+#endif
12155+
12156+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12157+ unsigned int sethi, ba, nop;
12158+
12159+ err = get_user(sethi, (unsigned int *)regs->tpc);
12160+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12161+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12162+
12163+ if (err)
12164+ break;
12165+
12166+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12167+ (ba & 0xFFF00000U) == 0x30600000U &&
12168+ nop == 0x01000000U)
12169+ {
12170+ unsigned long addr;
12171+
12172+ addr = (sethi & 0x003FFFFFU) << 10;
12173+ regs->u_regs[UREG_G1] = addr;
12174+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12175+
12176+ if (test_thread_flag(TIF_32BIT))
12177+ addr &= 0xFFFFFFFFUL;
12178+
12179+ regs->tpc = addr;
12180+ regs->tnpc = addr+4;
12181+ return 2;
12182+ }
12183+ } while (0);
12184+
12185+#endif
12186+
12187+ return 1;
12188+}
12189+
12190+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12191+{
12192+ unsigned long i;
12193+
12194+ printk(KERN_ERR "PAX: bytes at PC: ");
12195+ for (i = 0; i < 8; i++) {
12196+ unsigned int c;
12197+ if (get_user(c, (unsigned int *)pc+i))
12198+ printk(KERN_CONT "???????? ");
12199+ else
12200+ printk(KERN_CONT "%08x ", c);
12201+ }
12202+ printk("\n");
12203+}
12204+#endif
12205+
12206 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12207 {
12208 enum ctx_state prev_state = exception_enter();
12209@@ -353,6 +816,29 @@ retry:
12210 if (!vma)
12211 goto bad_area;
12212
12213+#ifdef CONFIG_PAX_PAGEEXEC
12214+ /* PaX: detect ITLB misses on non-exec pages */
12215+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12216+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12217+ {
12218+ if (address != regs->tpc)
12219+ goto good_area;
12220+
12221+ up_read(&mm->mmap_sem);
12222+ switch (pax_handle_fetch_fault(regs)) {
12223+
12224+#ifdef CONFIG_PAX_EMUPLT
12225+ case 2:
12226+ case 3:
12227+ return;
12228+#endif
12229+
12230+ }
12231+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12232+ do_group_exit(SIGKILL);
12233+ }
12234+#endif
12235+
12236 /* Pure DTLB misses do not tell us whether the fault causing
12237 * load/store/atomic was a write or not, it only says that there
12238 * was no match. So in such a case we (carefully) read the
12239diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12240index d329537..2c3746a 100644
12241--- a/arch/sparc/mm/hugetlbpage.c
12242+++ b/arch/sparc/mm/hugetlbpage.c
12243@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12244 unsigned long addr,
12245 unsigned long len,
12246 unsigned long pgoff,
12247- unsigned long flags)
12248+ unsigned long flags,
12249+ unsigned long offset)
12250 {
12251+ struct mm_struct *mm = current->mm;
12252 unsigned long task_size = TASK_SIZE;
12253 struct vm_unmapped_area_info info;
12254
12255@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12256
12257 info.flags = 0;
12258 info.length = len;
12259- info.low_limit = TASK_UNMAPPED_BASE;
12260+ info.low_limit = mm->mmap_base;
12261 info.high_limit = min(task_size, VA_EXCLUDE_START);
12262 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12263 info.align_offset = 0;
12264+ info.threadstack_offset = offset;
12265 addr = vm_unmapped_area(&info);
12266
12267 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12268 VM_BUG_ON(addr != -ENOMEM);
12269 info.low_limit = VA_EXCLUDE_END;
12270+
12271+#ifdef CONFIG_PAX_RANDMMAP
12272+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12273+ info.low_limit += mm->delta_mmap;
12274+#endif
12275+
12276 info.high_limit = task_size;
12277 addr = vm_unmapped_area(&info);
12278 }
12279@@ -55,7 +64,8 @@ static unsigned long
12280 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12281 const unsigned long len,
12282 const unsigned long pgoff,
12283- const unsigned long flags)
12284+ const unsigned long flags,
12285+ const unsigned long offset)
12286 {
12287 struct mm_struct *mm = current->mm;
12288 unsigned long addr = addr0;
12289@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12290 info.high_limit = mm->mmap_base;
12291 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12292 info.align_offset = 0;
12293+ info.threadstack_offset = offset;
12294 addr = vm_unmapped_area(&info);
12295
12296 /*
12297@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12298 VM_BUG_ON(addr != -ENOMEM);
12299 info.flags = 0;
12300 info.low_limit = TASK_UNMAPPED_BASE;
12301+
12302+#ifdef CONFIG_PAX_RANDMMAP
12303+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12304+ info.low_limit += mm->delta_mmap;
12305+#endif
12306+
12307 info.high_limit = STACK_TOP32;
12308 addr = vm_unmapped_area(&info);
12309 }
12310@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12311 struct mm_struct *mm = current->mm;
12312 struct vm_area_struct *vma;
12313 unsigned long task_size = TASK_SIZE;
12314+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12315
12316 if (test_thread_flag(TIF_32BIT))
12317 task_size = STACK_TOP32;
12318@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12319 return addr;
12320 }
12321
12322+#ifdef CONFIG_PAX_RANDMMAP
12323+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12324+#endif
12325+
12326 if (addr) {
12327 addr = ALIGN(addr, HPAGE_SIZE);
12328 vma = find_vma(mm, addr);
12329- if (task_size - len >= addr &&
12330- (!vma || addr + len <= vma->vm_start))
12331+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12332 return addr;
12333 }
12334 if (mm->get_unmapped_area == arch_get_unmapped_area)
12335 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12336- pgoff, flags);
12337+ pgoff, flags, offset);
12338 else
12339 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12340- pgoff, flags);
12341+ pgoff, flags, offset);
12342 }
12343
12344 pte_t *huge_pte_alloc(struct mm_struct *mm,
12345diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12346index 3ea267c..93f0659 100644
12347--- a/arch/sparc/mm/init_64.c
12348+++ b/arch/sparc/mm/init_64.c
12349@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12350 int num_kernel_image_mappings;
12351
12352 #ifdef CONFIG_DEBUG_DCFLUSH
12353-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12354+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12355 #ifdef CONFIG_SMP
12356-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12357+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12358 #endif
12359 #endif
12360
12361@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12362 {
12363 BUG_ON(tlb_type == hypervisor);
12364 #ifdef CONFIG_DEBUG_DCFLUSH
12365- atomic_inc(&dcpage_flushes);
12366+ atomic_inc_unchecked(&dcpage_flushes);
12367 #endif
12368
12369 #ifdef DCACHE_ALIASING_POSSIBLE
12370@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12371
12372 #ifdef CONFIG_DEBUG_DCFLUSH
12373 seq_printf(m, "DCPageFlushes\t: %d\n",
12374- atomic_read(&dcpage_flushes));
12375+ atomic_read_unchecked(&dcpage_flushes));
12376 #ifdef CONFIG_SMP
12377 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12378- atomic_read(&dcpage_flushes_xcall));
12379+ atomic_read_unchecked(&dcpage_flushes_xcall));
12380 #endif /* CONFIG_SMP */
12381 #endif /* CONFIG_DEBUG_DCFLUSH */
12382 }
12383diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12384index 7cca418..53fc030 100644
12385--- a/arch/tile/Kconfig
12386+++ b/arch/tile/Kconfig
12387@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12388
12389 config KEXEC
12390 bool "kexec system call"
12391+ depends on !GRKERNSEC_KMEM
12392 ---help---
12393 kexec is a system call that implements the ability to shutdown your
12394 current kernel, and to start another kernel. It is like a reboot
12395diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12396index 7b11c5f..755a026 100644
12397--- a/arch/tile/include/asm/atomic_64.h
12398+++ b/arch/tile/include/asm/atomic_64.h
12399@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12400
12401 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12402
12403+#define atomic64_read_unchecked(v) atomic64_read(v)
12404+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12405+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12406+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12407+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12408+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12409+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12410+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12411+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12412+
12413 /* Define this to indicate that cmpxchg is an efficient operation. */
12414 #define __HAVE_ARCH_CMPXCHG
12415
12416diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12417index 6160761..00cac88 100644
12418--- a/arch/tile/include/asm/cache.h
12419+++ b/arch/tile/include/asm/cache.h
12420@@ -15,11 +15,12 @@
12421 #ifndef _ASM_TILE_CACHE_H
12422 #define _ASM_TILE_CACHE_H
12423
12424+#include <linux/const.h>
12425 #include <arch/chip.h>
12426
12427 /* bytes per L1 data cache line */
12428 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12430+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12431
12432 /* bytes per L2 cache line */
12433 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12434diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12435index b6cde32..c0cb736 100644
12436--- a/arch/tile/include/asm/uaccess.h
12437+++ b/arch/tile/include/asm/uaccess.h
12438@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12439 const void __user *from,
12440 unsigned long n)
12441 {
12442- int sz = __compiletime_object_size(to);
12443+ size_t sz = __compiletime_object_size(to);
12444
12445- if (likely(sz == -1 || sz >= n))
12446+ if (likely(sz == (size_t)-1 || sz >= n))
12447 n = _copy_from_user(to, from, n);
12448 else
12449 copy_from_user_overflow();
12450diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12451index 3270e00..a77236e 100644
12452--- a/arch/tile/mm/hugetlbpage.c
12453+++ b/arch/tile/mm/hugetlbpage.c
12454@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12455 info.high_limit = TASK_SIZE;
12456 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12457 info.align_offset = 0;
12458+ info.threadstack_offset = 0;
12459 return vm_unmapped_area(&info);
12460 }
12461
12462@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12463 info.high_limit = current->mm->mmap_base;
12464 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12465 info.align_offset = 0;
12466+ info.threadstack_offset = 0;
12467 addr = vm_unmapped_area(&info);
12468
12469 /*
12470diff --git a/arch/um/Makefile b/arch/um/Makefile
12471index e4b1a96..16162f8 100644
12472--- a/arch/um/Makefile
12473+++ b/arch/um/Makefile
12474@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12475 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12476 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12477
12478+ifdef CONSTIFY_PLUGIN
12479+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12480+endif
12481+
12482 #This will adjust *FLAGS accordingly to the platform.
12483 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12484
12485diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12486index 19e1bdd..3665b77 100644
12487--- a/arch/um/include/asm/cache.h
12488+++ b/arch/um/include/asm/cache.h
12489@@ -1,6 +1,7 @@
12490 #ifndef __UM_CACHE_H
12491 #define __UM_CACHE_H
12492
12493+#include <linux/const.h>
12494
12495 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12496 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12497@@ -12,6 +13,6 @@
12498 # define L1_CACHE_SHIFT 5
12499 #endif
12500
12501-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12502+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12503
12504 #endif
12505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12506index 2e0a6b1..a64d0f5 100644
12507--- a/arch/um/include/asm/kmap_types.h
12508+++ b/arch/um/include/asm/kmap_types.h
12509@@ -8,6 +8,6 @@
12510
12511 /* No more #include "asm/arch/kmap_types.h" ! */
12512
12513-#define KM_TYPE_NR 14
12514+#define KM_TYPE_NR 15
12515
12516 #endif
12517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12518index 71c5d13..4c7b9f1 100644
12519--- a/arch/um/include/asm/page.h
12520+++ b/arch/um/include/asm/page.h
12521@@ -14,6 +14,9 @@
12522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12523 #define PAGE_MASK (~(PAGE_SIZE-1))
12524
12525+#define ktla_ktva(addr) (addr)
12526+#define ktva_ktla(addr) (addr)
12527+
12528 #ifndef __ASSEMBLY__
12529
12530 struct page;
12531diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12532index 0032f92..cd151e0 100644
12533--- a/arch/um/include/asm/pgtable-3level.h
12534+++ b/arch/um/include/asm/pgtable-3level.h
12535@@ -58,6 +58,7 @@
12536 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12537 #define pud_populate(mm, pud, pmd) \
12538 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12539+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12540
12541 #ifdef CONFIG_64BIT
12542 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12543diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12544index f17bca8..48adb87 100644
12545--- a/arch/um/kernel/process.c
12546+++ b/arch/um/kernel/process.c
12547@@ -356,22 +356,6 @@ int singlestepping(void * t)
12548 return 2;
12549 }
12550
12551-/*
12552- * Only x86 and x86_64 have an arch_align_stack().
12553- * All other arches have "#define arch_align_stack(x) (x)"
12554- * in their asm/exec.h
12555- * As this is included in UML from asm-um/system-generic.h,
12556- * we can use it to behave as the subarch does.
12557- */
12558-#ifndef arch_align_stack
12559-unsigned long arch_align_stack(unsigned long sp)
12560-{
12561- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12562- sp -= get_random_int() % 8192;
12563- return sp & ~0xf;
12564-}
12565-#endif
12566-
12567 unsigned long get_wchan(struct task_struct *p)
12568 {
12569 unsigned long stack_page, sp, ip;
12570diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12571index ad8f795..2c7eec6 100644
12572--- a/arch/unicore32/include/asm/cache.h
12573+++ b/arch/unicore32/include/asm/cache.h
12574@@ -12,8 +12,10 @@
12575 #ifndef __UNICORE_CACHE_H__
12576 #define __UNICORE_CACHE_H__
12577
12578-#define L1_CACHE_SHIFT (5)
12579-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12580+#include <linux/const.h>
12581+
12582+#define L1_CACHE_SHIFT 5
12583+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12584
12585 /*
12586 * Memory returned by kmalloc() may be used for DMA, so we must make
12587diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12588index 0dc9d01..98df103 100644
12589--- a/arch/x86/Kconfig
12590+++ b/arch/x86/Kconfig
12591@@ -130,7 +130,7 @@ config X86
12592 select RTC_LIB
12593 select HAVE_DEBUG_STACKOVERFLOW
12594 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12595- select HAVE_CC_STACKPROTECTOR
12596+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12597 select GENERIC_CPU_AUTOPROBE
12598 select HAVE_ARCH_AUDITSYSCALL
12599 select ARCH_SUPPORTS_ATOMIC_RMW
12600@@ -263,7 +263,7 @@ config X86_HT
12601
12602 config X86_32_LAZY_GS
12603 def_bool y
12604- depends on X86_32 && !CC_STACKPROTECTOR
12605+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12606
12607 config ARCH_HWEIGHT_CFLAGS
12608 string
12609@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12610
12611 menuconfig HYPERVISOR_GUEST
12612 bool "Linux guest support"
12613+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12614 ---help---
12615 Say Y here to enable options for running Linux under various hyper-
12616 visors. This option enables basic hypervisor detection and platform
12617@@ -978,6 +979,7 @@ config VM86
12618
12619 config X86_16BIT
12620 bool "Enable support for 16-bit segments" if EXPERT
12621+ depends on !GRKERNSEC
12622 default y
12623 ---help---
12624 This option is required by programs like Wine to run 16-bit
12625@@ -1151,6 +1153,7 @@ choice
12626
12627 config NOHIGHMEM
12628 bool "off"
12629+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12630 ---help---
12631 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12632 However, the address space of 32-bit x86 processors is only 4
12633@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12634
12635 config HIGHMEM4G
12636 bool "4GB"
12637+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12638 ---help---
12639 Select this if you have a 32-bit processor and between 1 and 4
12640 gigabytes of physical RAM.
12641@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12642 hex
12643 default 0xB0000000 if VMSPLIT_3G_OPT
12644 default 0x80000000 if VMSPLIT_2G
12645- default 0x78000000 if VMSPLIT_2G_OPT
12646+ default 0x70000000 if VMSPLIT_2G_OPT
12647 default 0x40000000 if VMSPLIT_1G
12648 default 0xC0000000
12649 depends on X86_32
12650@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12651
12652 config KEXEC
12653 bool "kexec system call"
12654+ depends on !GRKERNSEC_KMEM
12655 ---help---
12656 kexec is a system call that implements the ability to shutdown your
12657 current kernel, and to start another kernel. It is like a reboot
12658@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12659
12660 config PHYSICAL_ALIGN
12661 hex "Alignment value to which kernel should be aligned"
12662- default "0x200000"
12663+ default "0x1000000"
12664+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12665+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12666 range 0x2000 0x1000000 if X86_32
12667 range 0x200000 0x1000000 if X86_64
12668 ---help---
12669@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12670 def_bool n
12671 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12672 depends on X86_32 || IA32_EMULATION
12673+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12674 ---help---
12675 Certain buggy versions of glibc will crash if they are
12676 presented with a 32-bit vDSO that is not mapped at the address
12677diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12678index 6983314..54ad7e8 100644
12679--- a/arch/x86/Kconfig.cpu
12680+++ b/arch/x86/Kconfig.cpu
12681@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12682
12683 config X86_F00F_BUG
12684 def_bool y
12685- depends on M586MMX || M586TSC || M586 || M486
12686+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12687
12688 config X86_INVD_BUG
12689 def_bool y
12690@@ -327,7 +327,7 @@ config X86_INVD_BUG
12691
12692 config X86_ALIGNMENT_16
12693 def_bool y
12694- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12695+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12696
12697 config X86_INTEL_USERCOPY
12698 def_bool y
12699@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12700 # generates cmov.
12701 config X86_CMOV
12702 def_bool y
12703- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12704+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12705
12706 config X86_MINIMUM_CPU_FAMILY
12707 int
12708diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12709index 61bd2ad..50b625d 100644
12710--- a/arch/x86/Kconfig.debug
12711+++ b/arch/x86/Kconfig.debug
12712@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12713 config DEBUG_RODATA
12714 bool "Write protect kernel read-only data structures"
12715 default y
12716- depends on DEBUG_KERNEL
12717+ depends on DEBUG_KERNEL && BROKEN
12718 ---help---
12719 Mark the kernel read-only data as write-protected in the pagetables,
12720 in order to catch accidental (and incorrect) writes to such const
12721@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12722
12723 config DEBUG_SET_MODULE_RONX
12724 bool "Set loadable kernel module data as NX and text as RO"
12725- depends on MODULES
12726+ depends on MODULES && BROKEN
12727 ---help---
12728 This option helps catch unintended modifications to loadable
12729 kernel module's text and read-only data. It also prevents execution
12730diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12731index 920e616..ac3d4df 100644
12732--- a/arch/x86/Makefile
12733+++ b/arch/x86/Makefile
12734@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12735 # CPU-specific tuning. Anything which can be shared with UML should go here.
12736 include $(srctree)/arch/x86/Makefile_32.cpu
12737 KBUILD_CFLAGS += $(cflags-y)
12738-
12739- # temporary until string.h is fixed
12740- KBUILD_CFLAGS += -ffreestanding
12741 else
12742 BITS := 64
12743 UTS_MACHINE := x86_64
12744@@ -107,6 +104,9 @@ else
12745 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12746 endif
12747
12748+# temporary until string.h is fixed
12749+KBUILD_CFLAGS += -ffreestanding
12750+
12751 # Make sure compiler does not have buggy stack-protector support.
12752 ifdef CONFIG_CC_STACKPROTECTOR
12753 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12754@@ -180,6 +180,7 @@ archheaders:
12755 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12756
12757 archprepare:
12758+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12759 ifeq ($(CONFIG_KEXEC_FILE),y)
12760 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12761 endif
12762@@ -263,3 +264,9 @@ define archhelp
12763 echo ' FDARGS="..." arguments for the booted kernel'
12764 echo ' FDINITRD=file initrd for the booted kernel'
12765 endef
12766+
12767+define OLD_LD
12768+
12769+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12770+*** Please upgrade your binutils to 2.18 or newer
12771+endef
12772diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12773index 3db07f3..9d81d0f 100644
12774--- a/arch/x86/boot/Makefile
12775+++ b/arch/x86/boot/Makefile
12776@@ -56,6 +56,9 @@ clean-files += cpustr.h
12777 # ---------------------------------------------------------------------------
12778
12779 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12780+ifdef CONSTIFY_PLUGIN
12781+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12782+endif
12783 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12784 GCOV_PROFILE := n
12785
12786diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12787index 878e4b9..20537ab 100644
12788--- a/arch/x86/boot/bitops.h
12789+++ b/arch/x86/boot/bitops.h
12790@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12791 u8 v;
12792 const u32 *p = (const u32 *)addr;
12793
12794- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12795+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12796 return v;
12797 }
12798
12799@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12800
12801 static inline void set_bit(int nr, void *addr)
12802 {
12803- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12804+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12805 }
12806
12807 #endif /* BOOT_BITOPS_H */
12808diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12809index bd49ec6..94c7f58 100644
12810--- a/arch/x86/boot/boot.h
12811+++ b/arch/x86/boot/boot.h
12812@@ -84,7 +84,7 @@ static inline void io_delay(void)
12813 static inline u16 ds(void)
12814 {
12815 u16 seg;
12816- asm("movw %%ds,%0" : "=rm" (seg));
12817+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12818 return seg;
12819 }
12820
12821diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12822index 8bd44e8..6b111e9 100644
12823--- a/arch/x86/boot/compressed/Makefile
12824+++ b/arch/x86/boot/compressed/Makefile
12825@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12826 KBUILD_CFLAGS += -mno-mmx -mno-sse
12827 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12828 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12829+ifdef CONSTIFY_PLUGIN
12830+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12831+endif
12832
12833 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12834 GCOV_PROFILE := n
12835diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12836index a53440e..c3dbf1e 100644
12837--- a/arch/x86/boot/compressed/efi_stub_32.S
12838+++ b/arch/x86/boot/compressed/efi_stub_32.S
12839@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12840 * parameter 2, ..., param n. To make things easy, we save the return
12841 * address of efi_call_phys in a global variable.
12842 */
12843- popl %ecx
12844- movl %ecx, saved_return_addr(%edx)
12845- /* get the function pointer into ECX*/
12846- popl %ecx
12847- movl %ecx, efi_rt_function_ptr(%edx)
12848+ popl saved_return_addr(%edx)
12849+ popl efi_rt_function_ptr(%edx)
12850
12851 /*
12852 * 3. Call the physical function.
12853 */
12854- call *%ecx
12855+ call *efi_rt_function_ptr(%edx)
12856
12857 /*
12858 * 4. Balance the stack. And because EAX contain the return value,
12859@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12860 1: popl %edx
12861 subl $1b, %edx
12862
12863- movl efi_rt_function_ptr(%edx), %ecx
12864- pushl %ecx
12865+ pushl efi_rt_function_ptr(%edx)
12866
12867 /*
12868 * 10. Push the saved return address onto the stack and return.
12869 */
12870- movl saved_return_addr(%edx), %ecx
12871- pushl %ecx
12872- ret
12873+ jmpl *saved_return_addr(%edx)
12874 ENDPROC(efi_call_phys)
12875 .previous
12876
12877diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12878index 630384a..278e788 100644
12879--- a/arch/x86/boot/compressed/efi_thunk_64.S
12880+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12881@@ -189,8 +189,8 @@ efi_gdt64:
12882 .long 0 /* Filled out by user */
12883 .word 0
12884 .quad 0x0000000000000000 /* NULL descriptor */
12885- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12886- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12887+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12888+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12889 .quad 0x0080890000000000 /* TS descriptor */
12890 .quad 0x0000000000000000 /* TS continued */
12891 efi_gdt64_end:
12892diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12893index 1d7fbbc..36ecd58 100644
12894--- a/arch/x86/boot/compressed/head_32.S
12895+++ b/arch/x86/boot/compressed/head_32.S
12896@@ -140,10 +140,10 @@ preferred_addr:
12897 addl %eax, %ebx
12898 notl %eax
12899 andl %eax, %ebx
12900- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12901+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12902 jge 1f
12903 #endif
12904- movl $LOAD_PHYSICAL_ADDR, %ebx
12905+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12906 1:
12907
12908 /* Target address to relocate to for decompression */
12909diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12910index 6b1766c..ad465c9 100644
12911--- a/arch/x86/boot/compressed/head_64.S
12912+++ b/arch/x86/boot/compressed/head_64.S
12913@@ -94,10 +94,10 @@ ENTRY(startup_32)
12914 addl %eax, %ebx
12915 notl %eax
12916 andl %eax, %ebx
12917- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12918+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12919 jge 1f
12920 #endif
12921- movl $LOAD_PHYSICAL_ADDR, %ebx
12922+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12923 1:
12924
12925 /* Target address to relocate to for decompression */
12926@@ -322,10 +322,10 @@ preferred_addr:
12927 addq %rax, %rbp
12928 notq %rax
12929 andq %rax, %rbp
12930- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12931+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12932 jge 1f
12933 #endif
12934- movq $LOAD_PHYSICAL_ADDR, %rbp
12935+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12936 1:
12937
12938 /* Target address to relocate to for decompression */
12939@@ -434,8 +434,8 @@ gdt:
12940 .long gdt
12941 .word 0
12942 .quad 0x0000000000000000 /* NULL descriptor */
12943- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12944- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12945+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12946+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12947 .quad 0x0080890000000000 /* TS descriptor */
12948 .quad 0x0000000000000000 /* TS continued */
12949 gdt_end:
12950diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12951index a950864..c710239 100644
12952--- a/arch/x86/boot/compressed/misc.c
12953+++ b/arch/x86/boot/compressed/misc.c
12954@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12955 * Calculate the delta between where vmlinux was linked to load
12956 * and where it was actually loaded.
12957 */
12958- delta = min_addr - LOAD_PHYSICAL_ADDR;
12959+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12960 if (!delta) {
12961 debug_putstr("No relocation needed... ");
12962 return;
12963@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12964 Elf32_Ehdr ehdr;
12965 Elf32_Phdr *phdrs, *phdr;
12966 #endif
12967- void *dest;
12968+ void *dest, *prev;
12969 int i;
12970
12971 memcpy(&ehdr, output, sizeof(ehdr));
12972@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12973 case PT_LOAD:
12974 #ifdef CONFIG_RELOCATABLE
12975 dest = output;
12976- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12977+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12978 #else
12979 dest = (void *)(phdr->p_paddr);
12980 #endif
12981 memcpy(dest,
12982 output + phdr->p_offset,
12983 phdr->p_filesz);
12984+ if (i)
12985+ memset(prev, 0xff, dest - prev);
12986+ prev = dest + phdr->p_filesz;
12987 break;
12988 default: /* Ignore other PT_* */ break;
12989 }
12990@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12991 error("Destination address too large");
12992 #endif
12993 #ifndef CONFIG_RELOCATABLE
12994- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12995+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12996 error("Wrong destination address");
12997 #endif
12998
12999diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13000index 1fd7d57..0f7d096 100644
13001--- a/arch/x86/boot/cpucheck.c
13002+++ b/arch/x86/boot/cpucheck.c
13003@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13004 u32 ecx = MSR_K7_HWCR;
13005 u32 eax, edx;
13006
13007- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13008+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13009 eax &= ~(1 << 15);
13010- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13011+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13012
13013 get_cpuflags(); /* Make sure it really did something */
13014 err = check_cpuflags();
13015@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13016 u32 ecx = MSR_VIA_FCR;
13017 u32 eax, edx;
13018
13019- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13020+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13021 eax |= (1<<1)|(1<<7);
13022- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13023+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13024
13025 set_bit(X86_FEATURE_CX8, cpu.flags);
13026 err = check_cpuflags();
13027@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13028 u32 eax, edx;
13029 u32 level = 1;
13030
13031- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13032- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13033- asm("cpuid"
13034+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13035+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13036+ asm volatile("cpuid"
13037 : "+a" (level), "=d" (cpu.flags[0])
13038 : : "ecx", "ebx");
13039- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13040+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13041
13042 err = check_cpuflags();
13043 } else if (err == 0x01 &&
13044diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13045index 16ef025..91e033b 100644
13046--- a/arch/x86/boot/header.S
13047+++ b/arch/x86/boot/header.S
13048@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13049 # single linked list of
13050 # struct setup_data
13051
13052-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13053+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13054
13055 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13057+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13058+#else
13059 #define VO_INIT_SIZE (VO__end - VO__text)
13060+#endif
13061 #if ZO_INIT_SIZE > VO_INIT_SIZE
13062 #define INIT_SIZE ZO_INIT_SIZE
13063 #else
13064diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13065index db75d07..8e6d0af 100644
13066--- a/arch/x86/boot/memory.c
13067+++ b/arch/x86/boot/memory.c
13068@@ -19,7 +19,7 @@
13069
13070 static int detect_memory_e820(void)
13071 {
13072- int count = 0;
13073+ unsigned int count = 0;
13074 struct biosregs ireg, oreg;
13075 struct e820entry *desc = boot_params.e820_map;
13076 static struct e820entry buf; /* static so it is zeroed */
13077diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13078index ba3e100..6501b8f 100644
13079--- a/arch/x86/boot/video-vesa.c
13080+++ b/arch/x86/boot/video-vesa.c
13081@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13082
13083 boot_params.screen_info.vesapm_seg = oreg.es;
13084 boot_params.screen_info.vesapm_off = oreg.di;
13085+ boot_params.screen_info.vesapm_size = oreg.cx;
13086 }
13087
13088 /*
13089diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13090index 43eda28..5ab5fdb 100644
13091--- a/arch/x86/boot/video.c
13092+++ b/arch/x86/boot/video.c
13093@@ -96,7 +96,7 @@ static void store_mode_params(void)
13094 static unsigned int get_entry(void)
13095 {
13096 char entry_buf[4];
13097- int i, len = 0;
13098+ unsigned int i, len = 0;
13099 int key;
13100 unsigned int v;
13101
13102diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13103index 9105655..41779c1 100644
13104--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13105+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13106@@ -8,6 +8,8 @@
13107 * including this sentence is retained in full.
13108 */
13109
13110+#include <asm/alternative-asm.h>
13111+
13112 .extern crypto_ft_tab
13113 .extern crypto_it_tab
13114 .extern crypto_fl_tab
13115@@ -70,6 +72,8 @@
13116 je B192; \
13117 leaq 32(r9),r9;
13118
13119+#define ret pax_force_retaddr; ret
13120+
13121 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13122 movq r1,r2; \
13123 movq r3,r4; \
13124diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13125index 477e9d7..c92c7d8 100644
13126--- a/arch/x86/crypto/aesni-intel_asm.S
13127+++ b/arch/x86/crypto/aesni-intel_asm.S
13128@@ -31,6 +31,7 @@
13129
13130 #include <linux/linkage.h>
13131 #include <asm/inst.h>
13132+#include <asm/alternative-asm.h>
13133
13134 #ifdef __x86_64__
13135 .data
13136@@ -205,7 +206,7 @@ enc: .octa 0x2
13137 * num_initial_blocks = b mod 4
13138 * encrypt the initial num_initial_blocks blocks and apply ghash on
13139 * the ciphertext
13140-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13141+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13142 * are clobbered
13143 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13144 */
13145@@ -214,8 +215,8 @@ enc: .octa 0x2
13146 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13147 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13148 mov arg7, %r10 # %r10 = AAD
13149- mov arg8, %r12 # %r12 = aadLen
13150- mov %r12, %r11
13151+ mov arg8, %r15 # %r15 = aadLen
13152+ mov %r15, %r11
13153 pxor %xmm\i, %xmm\i
13154 _get_AAD_loop\num_initial_blocks\operation:
13155 movd (%r10), \TMP1
13156@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13157 psrldq $4, %xmm\i
13158 pxor \TMP1, %xmm\i
13159 add $4, %r10
13160- sub $4, %r12
13161+ sub $4, %r15
13162 jne _get_AAD_loop\num_initial_blocks\operation
13163 cmp $16, %r11
13164 je _get_AAD_loop2_done\num_initial_blocks\operation
13165- mov $16, %r12
13166+ mov $16, %r15
13167 _get_AAD_loop2\num_initial_blocks\operation:
13168 psrldq $4, %xmm\i
13169- sub $4, %r12
13170- cmp %r11, %r12
13171+ sub $4, %r15
13172+ cmp %r11, %r15
13173 jne _get_AAD_loop2\num_initial_blocks\operation
13174 _get_AAD_loop2_done\num_initial_blocks\operation:
13175 movdqa SHUF_MASK(%rip), %xmm14
13176@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13177 * num_initial_blocks = b mod 4
13178 * encrypt the initial num_initial_blocks blocks and apply ghash on
13179 * the ciphertext
13180-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13181+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13182 * are clobbered
13183 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13184 */
13185@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13186 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13187 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13188 mov arg7, %r10 # %r10 = AAD
13189- mov arg8, %r12 # %r12 = aadLen
13190- mov %r12, %r11
13191+ mov arg8, %r15 # %r15 = aadLen
13192+ mov %r15, %r11
13193 pxor %xmm\i, %xmm\i
13194 _get_AAD_loop\num_initial_blocks\operation:
13195 movd (%r10), \TMP1
13196@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13197 psrldq $4, %xmm\i
13198 pxor \TMP1, %xmm\i
13199 add $4, %r10
13200- sub $4, %r12
13201+ sub $4, %r15
13202 jne _get_AAD_loop\num_initial_blocks\operation
13203 cmp $16, %r11
13204 je _get_AAD_loop2_done\num_initial_blocks\operation
13205- mov $16, %r12
13206+ mov $16, %r15
13207 _get_AAD_loop2\num_initial_blocks\operation:
13208 psrldq $4, %xmm\i
13209- sub $4, %r12
13210- cmp %r11, %r12
13211+ sub $4, %r15
13212+ cmp %r11, %r15
13213 jne _get_AAD_loop2\num_initial_blocks\operation
13214 _get_AAD_loop2_done\num_initial_blocks\operation:
13215 movdqa SHUF_MASK(%rip), %xmm14
13216@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13217 *
13218 *****************************************************************************/
13219 ENTRY(aesni_gcm_dec)
13220- push %r12
13221+ push %r15
13222 push %r13
13223 push %r14
13224 mov %rsp, %r14
13225@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13226 */
13227 sub $VARIABLE_OFFSET, %rsp
13228 and $~63, %rsp # align rsp to 64 bytes
13229- mov %arg6, %r12
13230- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13231+ mov %arg6, %r15
13232+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13233 movdqa SHUF_MASK(%rip), %xmm2
13234 PSHUFB_XMM %xmm2, %xmm13
13235
13236@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13237 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13238 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13239 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13240- mov %r13, %r12
13241- and $(3<<4), %r12
13242+ mov %r13, %r15
13243+ and $(3<<4), %r15
13244 jz _initial_num_blocks_is_0_decrypt
13245- cmp $(2<<4), %r12
13246+ cmp $(2<<4), %r15
13247 jb _initial_num_blocks_is_1_decrypt
13248 je _initial_num_blocks_is_2_decrypt
13249 _initial_num_blocks_is_3_decrypt:
13250@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13251 sub $16, %r11
13252 add %r13, %r11
13253 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13254- lea SHIFT_MASK+16(%rip), %r12
13255- sub %r13, %r12
13256+ lea SHIFT_MASK+16(%rip), %r15
13257+ sub %r13, %r15
13258 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13259 # (%r13 is the number of bytes in plaintext mod 16)
13260- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13261+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13262 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13263
13264 movdqa %xmm1, %xmm2
13265 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13266- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13267+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13268 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13269 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13270 pand %xmm1, %xmm2
13271@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13272 sub $1, %r13
13273 jne _less_than_8_bytes_left_decrypt
13274 _multiple_of_16_bytes_decrypt:
13275- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13276- shl $3, %r12 # convert into number of bits
13277- movd %r12d, %xmm15 # len(A) in %xmm15
13278+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13279+ shl $3, %r15 # convert into number of bits
13280+ movd %r15d, %xmm15 # len(A) in %xmm15
13281 shl $3, %arg4 # len(C) in bits (*128)
13282 MOVQ_R64_XMM %arg4, %xmm1
13283 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13284@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13285 mov %r14, %rsp
13286 pop %r14
13287 pop %r13
13288- pop %r12
13289+ pop %r15
13290+ pax_force_retaddr
13291 ret
13292 ENDPROC(aesni_gcm_dec)
13293
13294@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13295 * poly = x^128 + x^127 + x^126 + x^121 + 1
13296 ***************************************************************************/
13297 ENTRY(aesni_gcm_enc)
13298- push %r12
13299+ push %r15
13300 push %r13
13301 push %r14
13302 mov %rsp, %r14
13303@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13304 #
13305 sub $VARIABLE_OFFSET, %rsp
13306 and $~63, %rsp
13307- mov %arg6, %r12
13308- movdqu (%r12), %xmm13
13309+ mov %arg6, %r15
13310+ movdqu (%r15), %xmm13
13311 movdqa SHUF_MASK(%rip), %xmm2
13312 PSHUFB_XMM %xmm2, %xmm13
13313
13314@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13315 movdqa %xmm13, HashKey(%rsp)
13316 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13317 and $-16, %r13
13318- mov %r13, %r12
13319+ mov %r13, %r15
13320
13321 # Encrypt first few blocks
13322
13323- and $(3<<4), %r12
13324+ and $(3<<4), %r15
13325 jz _initial_num_blocks_is_0_encrypt
13326- cmp $(2<<4), %r12
13327+ cmp $(2<<4), %r15
13328 jb _initial_num_blocks_is_1_encrypt
13329 je _initial_num_blocks_is_2_encrypt
13330 _initial_num_blocks_is_3_encrypt:
13331@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13332 sub $16, %r11
13333 add %r13, %r11
13334 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13335- lea SHIFT_MASK+16(%rip), %r12
13336- sub %r13, %r12
13337+ lea SHIFT_MASK+16(%rip), %r15
13338+ sub %r13, %r15
13339 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13340 # (%r13 is the number of bytes in plaintext mod 16)
13341- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13342+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13343 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13344 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13345- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13346+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13347 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13348 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13349 movdqa SHUF_MASK(%rip), %xmm10
13350@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13351 sub $1, %r13
13352 jne _less_than_8_bytes_left_encrypt
13353 _multiple_of_16_bytes_encrypt:
13354- mov arg8, %r12 # %r12 = addLen (number of bytes)
13355- shl $3, %r12
13356- movd %r12d, %xmm15 # len(A) in %xmm15
13357+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13358+ shl $3, %r15
13359+ movd %r15d, %xmm15 # len(A) in %xmm15
13360 shl $3, %arg4 # len(C) in bits (*128)
13361 MOVQ_R64_XMM %arg4, %xmm1
13362 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13363@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13364 mov %r14, %rsp
13365 pop %r14
13366 pop %r13
13367- pop %r12
13368+ pop %r15
13369+ pax_force_retaddr
13370 ret
13371 ENDPROC(aesni_gcm_enc)
13372
13373@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13374 pxor %xmm1, %xmm0
13375 movaps %xmm0, (TKEYP)
13376 add $0x10, TKEYP
13377+ pax_force_retaddr
13378 ret
13379 ENDPROC(_key_expansion_128)
13380 ENDPROC(_key_expansion_256a)
13381@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13382 shufps $0b01001110, %xmm2, %xmm1
13383 movaps %xmm1, 0x10(TKEYP)
13384 add $0x20, TKEYP
13385+ pax_force_retaddr
13386 ret
13387 ENDPROC(_key_expansion_192a)
13388
13389@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13390
13391 movaps %xmm0, (TKEYP)
13392 add $0x10, TKEYP
13393+ pax_force_retaddr
13394 ret
13395 ENDPROC(_key_expansion_192b)
13396
13397@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13398 pxor %xmm1, %xmm2
13399 movaps %xmm2, (TKEYP)
13400 add $0x10, TKEYP
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(_key_expansion_256b)
13404
13405@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13406 #ifndef __x86_64__
13407 popl KEYP
13408 #endif
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(aesni_set_key)
13412
13413@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13414 popl KLEN
13415 popl KEYP
13416 #endif
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(aesni_enc)
13420
13421@@ -1974,6 +1983,7 @@ _aesni_enc1:
13422 AESENC KEY STATE
13423 movaps 0x70(TKEYP), KEY
13424 AESENCLAST KEY STATE
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_aesni_enc1)
13428
13429@@ -2083,6 +2093,7 @@ _aesni_enc4:
13430 AESENCLAST KEY STATE2
13431 AESENCLAST KEY STATE3
13432 AESENCLAST KEY STATE4
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_aesni_enc4)
13436
13437@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13438 popl KLEN
13439 popl KEYP
13440 #endif
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_dec)
13444
13445@@ -2164,6 +2176,7 @@ _aesni_dec1:
13446 AESDEC KEY STATE
13447 movaps 0x70(TKEYP), KEY
13448 AESDECLAST KEY STATE
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(_aesni_dec1)
13452
13453@@ -2273,6 +2286,7 @@ _aesni_dec4:
13454 AESDECLAST KEY STATE2
13455 AESDECLAST KEY STATE3
13456 AESDECLAST KEY STATE4
13457+ pax_force_retaddr
13458 ret
13459 ENDPROC(_aesni_dec4)
13460
13461@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13462 popl KEYP
13463 popl LEN
13464 #endif
13465+ pax_force_retaddr
13466 ret
13467 ENDPROC(aesni_ecb_enc)
13468
13469@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13470 popl KEYP
13471 popl LEN
13472 #endif
13473+ pax_force_retaddr
13474 ret
13475 ENDPROC(aesni_ecb_dec)
13476
13477@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13478 popl LEN
13479 popl IVP
13480 #endif
13481+ pax_force_retaddr
13482 ret
13483 ENDPROC(aesni_cbc_enc)
13484
13485@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13486 popl LEN
13487 popl IVP
13488 #endif
13489+ pax_force_retaddr
13490 ret
13491 ENDPROC(aesni_cbc_dec)
13492
13493@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13494 mov $1, TCTR_LOW
13495 MOVQ_R64_XMM TCTR_LOW INC
13496 MOVQ_R64_XMM CTR TCTR_LOW
13497+ pax_force_retaddr
13498 ret
13499 ENDPROC(_aesni_inc_init)
13500
13501@@ -2579,6 +2598,7 @@ _aesni_inc:
13502 .Linc_low:
13503 movaps CTR, IV
13504 PSHUFB_XMM BSWAP_MASK IV
13505+ pax_force_retaddr
13506 ret
13507 ENDPROC(_aesni_inc)
13508
13509@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13510 .Lctr_enc_ret:
13511 movups IV, (IVP)
13512 .Lctr_enc_just_ret:
13513+ pax_force_retaddr
13514 ret
13515 ENDPROC(aesni_ctr_enc)
13516
13517@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13518 pxor INC, STATE4
13519 movdqu STATE4, 0x70(OUTP)
13520
13521+ pax_force_retaddr
13522 ret
13523 ENDPROC(aesni_xts_crypt8)
13524
13525diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13526index 246c670..466e2d6 100644
13527--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13528+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13529@@ -21,6 +21,7 @@
13530 */
13531
13532 #include <linux/linkage.h>
13533+#include <asm/alternative-asm.h>
13534
13535 .file "blowfish-x86_64-asm.S"
13536 .text
13537@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13538 jnz .L__enc_xor;
13539
13540 write_block();
13541+ pax_force_retaddr
13542 ret;
13543 .L__enc_xor:
13544 xor_block();
13545+ pax_force_retaddr
13546 ret;
13547 ENDPROC(__blowfish_enc_blk)
13548
13549@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13550
13551 movq %r11, %rbp;
13552
13553+ pax_force_retaddr
13554 ret;
13555 ENDPROC(blowfish_dec_blk)
13556
13557@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13558
13559 popq %rbx;
13560 popq %rbp;
13561+ pax_force_retaddr
13562 ret;
13563
13564 .L__enc_xor4:
13565@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13566
13567 popq %rbx;
13568 popq %rbp;
13569+ pax_force_retaddr
13570 ret;
13571 ENDPROC(__blowfish_enc_blk_4way)
13572
13573@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13574 popq %rbx;
13575 popq %rbp;
13576
13577+ pax_force_retaddr
13578 ret;
13579 ENDPROC(blowfish_dec_blk_4way)
13580diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13581index ce71f92..1dce7ec 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13584@@ -16,6 +16,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13594 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13602 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13609 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13610 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13617 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13618 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13625 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13626 %xmm8, %rsi);
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_16way)
13631
13632@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13633 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13634 %xmm8, %rsi);
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_16way)
13639
13640@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13641 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13642 %xmm8, %rsi);
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_16way)
13647
13648@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13649 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13650 %xmm8, %rsi);
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_16way)
13655
13656@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13657 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13658 %xmm8, %rsi);
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_16way)
13663
13664diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13665index 0e0b886..5a3123c 100644
13666--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13667+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13668@@ -11,6 +11,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 #define CAMELLIA_TABLE_BYTE_LEN 272
13675
13676@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13677 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13678 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13679 %rcx, (%r9));
13680+ pax_force_retaddr
13681 ret;
13682 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13683
13684@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13685 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13686 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13687 %rax, (%r9));
13688+ pax_force_retaddr
13689 ret;
13690 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13691
13692@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13693 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13694 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13695
13696+ pax_force_retaddr
13697 ret;
13698
13699 .align 8
13700@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13701 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13702 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13703
13704+ pax_force_retaddr
13705 ret;
13706
13707 .align 8
13708@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13709
13710 vzeroupper;
13711
13712+ pax_force_retaddr
13713 ret;
13714 ENDPROC(camellia_ecb_enc_32way)
13715
13716@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13717
13718 vzeroupper;
13719
13720+ pax_force_retaddr
13721 ret;
13722 ENDPROC(camellia_ecb_dec_32way)
13723
13724@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13725
13726 vzeroupper;
13727
13728+ pax_force_retaddr
13729 ret;
13730 ENDPROC(camellia_cbc_dec_32way)
13731
13732@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13733
13734 vzeroupper;
13735
13736+ pax_force_retaddr
13737 ret;
13738 ENDPROC(camellia_ctr_32way)
13739
13740@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13741
13742 vzeroupper;
13743
13744+ pax_force_retaddr
13745 ret;
13746 ENDPROC(camellia_xts_crypt_32way)
13747
13748diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13749index 310319c..db3d7b5 100644
13750--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13751+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13752@@ -21,6 +21,7 @@
13753 */
13754
13755 #include <linux/linkage.h>
13756+#include <asm/alternative-asm.h>
13757
13758 .file "camellia-x86_64-asm_64.S"
13759 .text
13760@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13761 enc_outunpack(mov, RT1);
13762
13763 movq RRBP, %rbp;
13764+ pax_force_retaddr
13765 ret;
13766
13767 .L__enc_xor:
13768 enc_outunpack(xor, RT1);
13769
13770 movq RRBP, %rbp;
13771+ pax_force_retaddr
13772 ret;
13773 ENDPROC(__camellia_enc_blk)
13774
13775@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13776 dec_outunpack();
13777
13778 movq RRBP, %rbp;
13779+ pax_force_retaddr
13780 ret;
13781 ENDPROC(camellia_dec_blk)
13782
13783@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13784
13785 movq RRBP, %rbp;
13786 popq %rbx;
13787+ pax_force_retaddr
13788 ret;
13789
13790 .L__enc2_xor:
13791@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13792
13793 movq RRBP, %rbp;
13794 popq %rbx;
13795+ pax_force_retaddr
13796 ret;
13797 ENDPROC(__camellia_enc_blk_2way)
13798
13799@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13800
13801 movq RRBP, %rbp;
13802 movq RXOR, %rbx;
13803+ pax_force_retaddr
13804 ret;
13805 ENDPROC(camellia_dec_blk_2way)
13806diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13807index c35fd5d..2d8c7db 100644
13808--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13809+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13810@@ -24,6 +24,7 @@
13811 */
13812
13813 #include <linux/linkage.h>
13814+#include <asm/alternative-asm.h>
13815
13816 .file "cast5-avx-x86_64-asm_64.S"
13817
13818@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13819 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13820 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13821
13822+ pax_force_retaddr
13823 ret;
13824 ENDPROC(__cast5_enc_blk16)
13825
13826@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13827 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13828 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13829
13830+ pax_force_retaddr
13831 ret;
13832
13833 .L__skip_dec:
13834@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13835 vmovdqu RR4, (6*4*4)(%r11);
13836 vmovdqu RL4, (7*4*4)(%r11);
13837
13838+ pax_force_retaddr
13839 ret;
13840 ENDPROC(cast5_ecb_enc_16way)
13841
13842@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13843 vmovdqu RR4, (6*4*4)(%r11);
13844 vmovdqu RL4, (7*4*4)(%r11);
13845
13846+ pax_force_retaddr
13847 ret;
13848 ENDPROC(cast5_ecb_dec_16way)
13849
13850@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13851 * %rdx: src
13852 */
13853
13854- pushq %r12;
13855+ pushq %r14;
13856
13857 movq %rsi, %r11;
13858- movq %rdx, %r12;
13859+ movq %rdx, %r14;
13860
13861 vmovdqu (0*16)(%rdx), RL1;
13862 vmovdqu (1*16)(%rdx), RR1;
13863@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13864 call __cast5_dec_blk16;
13865
13866 /* xor with src */
13867- vmovq (%r12), RX;
13868+ vmovq (%r14), RX;
13869 vpshufd $0x4f, RX, RX;
13870 vpxor RX, RR1, RR1;
13871- vpxor 0*16+8(%r12), RL1, RL1;
13872- vpxor 1*16+8(%r12), RR2, RR2;
13873- vpxor 2*16+8(%r12), RL2, RL2;
13874- vpxor 3*16+8(%r12), RR3, RR3;
13875- vpxor 4*16+8(%r12), RL3, RL3;
13876- vpxor 5*16+8(%r12), RR4, RR4;
13877- vpxor 6*16+8(%r12), RL4, RL4;
13878+ vpxor 0*16+8(%r14), RL1, RL1;
13879+ vpxor 1*16+8(%r14), RR2, RR2;
13880+ vpxor 2*16+8(%r14), RL2, RL2;
13881+ vpxor 3*16+8(%r14), RR3, RR3;
13882+ vpxor 4*16+8(%r14), RL3, RL3;
13883+ vpxor 5*16+8(%r14), RR4, RR4;
13884+ vpxor 6*16+8(%r14), RL4, RL4;
13885
13886 vmovdqu RR1, (0*16)(%r11);
13887 vmovdqu RL1, (1*16)(%r11);
13888@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13889 vmovdqu RR4, (6*16)(%r11);
13890 vmovdqu RL4, (7*16)(%r11);
13891
13892- popq %r12;
13893+ popq %r14;
13894
13895+ pax_force_retaddr
13896 ret;
13897 ENDPROC(cast5_cbc_dec_16way)
13898
13899@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13900 * %rcx: iv (big endian, 64bit)
13901 */
13902
13903- pushq %r12;
13904+ pushq %r14;
13905
13906 movq %rsi, %r11;
13907- movq %rdx, %r12;
13908+ movq %rdx, %r14;
13909
13910 vpcmpeqd RTMP, RTMP, RTMP;
13911 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13912@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13913 call __cast5_enc_blk16;
13914
13915 /* dst = src ^ iv */
13916- vpxor (0*16)(%r12), RR1, RR1;
13917- vpxor (1*16)(%r12), RL1, RL1;
13918- vpxor (2*16)(%r12), RR2, RR2;
13919- vpxor (3*16)(%r12), RL2, RL2;
13920- vpxor (4*16)(%r12), RR3, RR3;
13921- vpxor (5*16)(%r12), RL3, RL3;
13922- vpxor (6*16)(%r12), RR4, RR4;
13923- vpxor (7*16)(%r12), RL4, RL4;
13924+ vpxor (0*16)(%r14), RR1, RR1;
13925+ vpxor (1*16)(%r14), RL1, RL1;
13926+ vpxor (2*16)(%r14), RR2, RR2;
13927+ vpxor (3*16)(%r14), RL2, RL2;
13928+ vpxor (4*16)(%r14), RR3, RR3;
13929+ vpxor (5*16)(%r14), RL3, RL3;
13930+ vpxor (6*16)(%r14), RR4, RR4;
13931+ vpxor (7*16)(%r14), RL4, RL4;
13932 vmovdqu RR1, (0*16)(%r11);
13933 vmovdqu RL1, (1*16)(%r11);
13934 vmovdqu RR2, (2*16)(%r11);
13935@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13936 vmovdqu RR4, (6*16)(%r11);
13937 vmovdqu RL4, (7*16)(%r11);
13938
13939- popq %r12;
13940+ popq %r14;
13941
13942+ pax_force_retaddr
13943 ret;
13944 ENDPROC(cast5_ctr_16way)
13945diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13946index e3531f8..e123f35 100644
13947--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13948+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13949@@ -24,6 +24,7 @@
13950 */
13951
13952 #include <linux/linkage.h>
13953+#include <asm/alternative-asm.h>
13954 #include "glue_helper-asm-avx.S"
13955
13956 .file "cast6-avx-x86_64-asm_64.S"
13957@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13958 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13959 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13960
13961+ pax_force_retaddr
13962 ret;
13963 ENDPROC(__cast6_enc_blk8)
13964
13965@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13966 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13967 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13968
13969+ pax_force_retaddr
13970 ret;
13971 ENDPROC(__cast6_dec_blk8)
13972
13973@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13974
13975 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13976
13977+ pax_force_retaddr
13978 ret;
13979 ENDPROC(cast6_ecb_enc_8way)
13980
13981@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13982
13983 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13984
13985+ pax_force_retaddr
13986 ret;
13987 ENDPROC(cast6_ecb_dec_8way)
13988
13989@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13990 * %rdx: src
13991 */
13992
13993- pushq %r12;
13994+ pushq %r14;
13995
13996 movq %rsi, %r11;
13997- movq %rdx, %r12;
13998+ movq %rdx, %r14;
13999
14000 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14001
14002 call __cast6_dec_blk8;
14003
14004- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14005+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14006
14007- popq %r12;
14008+ popq %r14;
14009
14010+ pax_force_retaddr
14011 ret;
14012 ENDPROC(cast6_cbc_dec_8way)
14013
14014@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14015 * %rcx: iv (little endian, 128bit)
14016 */
14017
14018- pushq %r12;
14019+ pushq %r14;
14020
14021 movq %rsi, %r11;
14022- movq %rdx, %r12;
14023+ movq %rdx, %r14;
14024
14025 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14026 RD2, RX, RKR, RKM);
14027
14028 call __cast6_enc_blk8;
14029
14030- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14031+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14032
14033- popq %r12;
14034+ popq %r14;
14035
14036+ pax_force_retaddr
14037 ret;
14038 ENDPROC(cast6_ctr_8way)
14039
14040@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14041 /* dst <= regs xor IVs(in dst) */
14042 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14043
14044+ pax_force_retaddr
14045 ret;
14046 ENDPROC(cast6_xts_enc_8way)
14047
14048@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14049 /* dst <= regs xor IVs(in dst) */
14050 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14051
14052+ pax_force_retaddr
14053 ret;
14054 ENDPROC(cast6_xts_dec_8way)
14055diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14056index 26d49eb..c0a8c84 100644
14057--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14058+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14059@@ -45,6 +45,7 @@
14060
14061 #include <asm/inst.h>
14062 #include <linux/linkage.h>
14063+#include <asm/alternative-asm.h>
14064
14065 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14066
14067@@ -309,6 +310,7 @@ do_return:
14068 popq %rsi
14069 popq %rdi
14070 popq %rbx
14071+ pax_force_retaddr
14072 ret
14073
14074 ################################################################
14075diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14076index 5d1e007..098cb4f 100644
14077--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14078+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14079@@ -18,6 +18,7 @@
14080
14081 #include <linux/linkage.h>
14082 #include <asm/inst.h>
14083+#include <asm/alternative-asm.h>
14084
14085 .data
14086
14087@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14088 psrlq $1, T2
14089 pxor T2, T1
14090 pxor T1, DATA
14091+ pax_force_retaddr
14092 ret
14093 ENDPROC(__clmul_gf128mul_ble)
14094
14095@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14096 call __clmul_gf128mul_ble
14097 PSHUFB_XMM BSWAP DATA
14098 movups DATA, (%rdi)
14099+ pax_force_retaddr
14100 ret
14101 ENDPROC(clmul_ghash_mul)
14102
14103@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14104 PSHUFB_XMM BSWAP DATA
14105 movups DATA, (%rdi)
14106 .Lupdate_just_ret:
14107+ pax_force_retaddr
14108 ret
14109 ENDPROC(clmul_ghash_update)
14110diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14111index 9279e0b..c4b3d2c 100644
14112--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14113+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14114@@ -1,4 +1,5 @@
14115 #include <linux/linkage.h>
14116+#include <asm/alternative-asm.h>
14117
14118 # enter salsa20_encrypt_bytes
14119 ENTRY(salsa20_encrypt_bytes)
14120@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14121 add %r11,%rsp
14122 mov %rdi,%rax
14123 mov %rsi,%rdx
14124+ pax_force_retaddr
14125 ret
14126 # bytesatleast65:
14127 ._bytesatleast65:
14128@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14129 add %r11,%rsp
14130 mov %rdi,%rax
14131 mov %rsi,%rdx
14132+ pax_force_retaddr
14133 ret
14134 ENDPROC(salsa20_keysetup)
14135
14136@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14137 add %r11,%rsp
14138 mov %rdi,%rax
14139 mov %rsi,%rdx
14140+ pax_force_retaddr
14141 ret
14142 ENDPROC(salsa20_ivsetup)
14143diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14144index 2f202f4..d9164d6 100644
14145--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14146+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14147@@ -24,6 +24,7 @@
14148 */
14149
14150 #include <linux/linkage.h>
14151+#include <asm/alternative-asm.h>
14152 #include "glue_helper-asm-avx.S"
14153
14154 .file "serpent-avx-x86_64-asm_64.S"
14155@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14156 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14157 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14158
14159+ pax_force_retaddr
14160 ret;
14161 ENDPROC(__serpent_enc_blk8_avx)
14162
14163@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14164 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14165 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14166
14167+ pax_force_retaddr
14168 ret;
14169 ENDPROC(__serpent_dec_blk8_avx)
14170
14171@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14172
14173 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14174
14175+ pax_force_retaddr
14176 ret;
14177 ENDPROC(serpent_ecb_enc_8way_avx)
14178
14179@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14180
14181 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14182
14183+ pax_force_retaddr
14184 ret;
14185 ENDPROC(serpent_ecb_dec_8way_avx)
14186
14187@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14188
14189 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14190
14191+ pax_force_retaddr
14192 ret;
14193 ENDPROC(serpent_cbc_dec_8way_avx)
14194
14195@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14196
14197 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14198
14199+ pax_force_retaddr
14200 ret;
14201 ENDPROC(serpent_ctr_8way_avx)
14202
14203@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14204 /* dst <= regs xor IVs(in dst) */
14205 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14206
14207+ pax_force_retaddr
14208 ret;
14209 ENDPROC(serpent_xts_enc_8way_avx)
14210
14211@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14212 /* dst <= regs xor IVs(in dst) */
14213 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14214
14215+ pax_force_retaddr
14216 ret;
14217 ENDPROC(serpent_xts_dec_8way_avx)
14218diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14219index b222085..abd483c 100644
14220--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14221+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14222@@ -15,6 +15,7 @@
14223 */
14224
14225 #include <linux/linkage.h>
14226+#include <asm/alternative-asm.h>
14227 #include "glue_helper-asm-avx2.S"
14228
14229 .file "serpent-avx2-asm_64.S"
14230@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14231 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14232 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14233
14234+ pax_force_retaddr
14235 ret;
14236 ENDPROC(__serpent_enc_blk16)
14237
14238@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14239 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14240 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14241
14242+ pax_force_retaddr
14243 ret;
14244 ENDPROC(__serpent_dec_blk16)
14245
14246@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14247
14248 vzeroupper;
14249
14250+ pax_force_retaddr
14251 ret;
14252 ENDPROC(serpent_ecb_enc_16way)
14253
14254@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14255
14256 vzeroupper;
14257
14258+ pax_force_retaddr
14259 ret;
14260 ENDPROC(serpent_ecb_dec_16way)
14261
14262@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14263
14264 vzeroupper;
14265
14266+ pax_force_retaddr
14267 ret;
14268 ENDPROC(serpent_cbc_dec_16way)
14269
14270@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14271
14272 vzeroupper;
14273
14274+ pax_force_retaddr
14275 ret;
14276 ENDPROC(serpent_ctr_16way)
14277
14278@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14279
14280 vzeroupper;
14281
14282+ pax_force_retaddr
14283 ret;
14284 ENDPROC(serpent_xts_enc_16way)
14285
14286@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14287
14288 vzeroupper;
14289
14290+ pax_force_retaddr
14291 ret;
14292 ENDPROC(serpent_xts_dec_16way)
14293diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14294index acc066c..1559cc4 100644
14295--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14296+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14297@@ -25,6 +25,7 @@
14298 */
14299
14300 #include <linux/linkage.h>
14301+#include <asm/alternative-asm.h>
14302
14303 .file "serpent-sse2-x86_64-asm_64.S"
14304 .text
14305@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14306 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14307 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14308
14309+ pax_force_retaddr
14310 ret;
14311
14312 .L__enc_xor8:
14313 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14314 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14315
14316+ pax_force_retaddr
14317 ret;
14318 ENDPROC(__serpent_enc_blk_8way)
14319
14320@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14321 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14322 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14323
14324+ pax_force_retaddr
14325 ret;
14326 ENDPROC(serpent_dec_blk_8way)
14327diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14328index a410950..9dfe7ad 100644
14329--- a/arch/x86/crypto/sha1_ssse3_asm.S
14330+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14331@@ -29,6 +29,7 @@
14332 */
14333
14334 #include <linux/linkage.h>
14335+#include <asm/alternative-asm.h>
14336
14337 #define CTX %rdi // arg1
14338 #define BUF %rsi // arg2
14339@@ -75,9 +76,9 @@
14340
14341 push %rbx
14342 push %rbp
14343- push %r12
14344+ push %r14
14345
14346- mov %rsp, %r12
14347+ mov %rsp, %r14
14348 sub $64, %rsp # allocate workspace
14349 and $~15, %rsp # align stack
14350
14351@@ -99,11 +100,12 @@
14352 xor %rax, %rax
14353 rep stosq
14354
14355- mov %r12, %rsp # deallocate workspace
14356+ mov %r14, %rsp # deallocate workspace
14357
14358- pop %r12
14359+ pop %r14
14360 pop %rbp
14361 pop %rbx
14362+ pax_force_retaddr
14363 ret
14364
14365 ENDPROC(\name)
14366diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14367index 642f156..51a513c 100644
14368--- a/arch/x86/crypto/sha256-avx-asm.S
14369+++ b/arch/x86/crypto/sha256-avx-asm.S
14370@@ -49,6 +49,7 @@
14371
14372 #ifdef CONFIG_AS_AVX
14373 #include <linux/linkage.h>
14374+#include <asm/alternative-asm.h>
14375
14376 ## assume buffers not aligned
14377 #define VMOVDQ vmovdqu
14378@@ -460,6 +461,7 @@ done_hash:
14379 popq %r13
14380 popq %rbp
14381 popq %rbx
14382+ pax_force_retaddr
14383 ret
14384 ENDPROC(sha256_transform_avx)
14385
14386diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14387index 9e86944..3795e6a 100644
14388--- a/arch/x86/crypto/sha256-avx2-asm.S
14389+++ b/arch/x86/crypto/sha256-avx2-asm.S
14390@@ -50,6 +50,7 @@
14391
14392 #ifdef CONFIG_AS_AVX2
14393 #include <linux/linkage.h>
14394+#include <asm/alternative-asm.h>
14395
14396 ## assume buffers not aligned
14397 #define VMOVDQ vmovdqu
14398@@ -720,6 +721,7 @@ done_hash:
14399 popq %r12
14400 popq %rbp
14401 popq %rbx
14402+ pax_force_retaddr
14403 ret
14404 ENDPROC(sha256_transform_rorx)
14405
14406diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14407index f833b74..8c62a9e 100644
14408--- a/arch/x86/crypto/sha256-ssse3-asm.S
14409+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14410@@ -47,6 +47,7 @@
14411 ########################################################################
14412
14413 #include <linux/linkage.h>
14414+#include <asm/alternative-asm.h>
14415
14416 ## assume buffers not aligned
14417 #define MOVDQ movdqu
14418@@ -471,6 +472,7 @@ done_hash:
14419 popq %rbp
14420 popq %rbx
14421
14422+ pax_force_retaddr
14423 ret
14424 ENDPROC(sha256_transform_ssse3)
14425
14426diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14427index 974dde9..a823ff9 100644
14428--- a/arch/x86/crypto/sha512-avx-asm.S
14429+++ b/arch/x86/crypto/sha512-avx-asm.S
14430@@ -49,6 +49,7 @@
14431
14432 #ifdef CONFIG_AS_AVX
14433 #include <linux/linkage.h>
14434+#include <asm/alternative-asm.h>
14435
14436 .text
14437
14438@@ -364,6 +365,7 @@ updateblock:
14439 mov frame_RSPSAVE(%rsp), %rsp
14440
14441 nowork:
14442+ pax_force_retaddr
14443 ret
14444 ENDPROC(sha512_transform_avx)
14445
14446diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14447index 568b961..ed20c37 100644
14448--- a/arch/x86/crypto/sha512-avx2-asm.S
14449+++ b/arch/x86/crypto/sha512-avx2-asm.S
14450@@ -51,6 +51,7 @@
14451
14452 #ifdef CONFIG_AS_AVX2
14453 #include <linux/linkage.h>
14454+#include <asm/alternative-asm.h>
14455
14456 .text
14457
14458@@ -678,6 +679,7 @@ done_hash:
14459
14460 # Restore Stack Pointer
14461 mov frame_RSPSAVE(%rsp), %rsp
14462+ pax_force_retaddr
14463 ret
14464 ENDPROC(sha512_transform_rorx)
14465
14466diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14467index fb56855..6edd768 100644
14468--- a/arch/x86/crypto/sha512-ssse3-asm.S
14469+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14470@@ -48,6 +48,7 @@
14471 ########################################################################
14472
14473 #include <linux/linkage.h>
14474+#include <asm/alternative-asm.h>
14475
14476 .text
14477
14478@@ -363,6 +364,7 @@ updateblock:
14479 mov frame_RSPSAVE(%rsp), %rsp
14480
14481 nowork:
14482+ pax_force_retaddr
14483 ret
14484 ENDPROC(sha512_transform_ssse3)
14485
14486diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14487index 0505813..b067311 100644
14488--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14489+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14490@@ -24,6 +24,7 @@
14491 */
14492
14493 #include <linux/linkage.h>
14494+#include <asm/alternative-asm.h>
14495 #include "glue_helper-asm-avx.S"
14496
14497 .file "twofish-avx-x86_64-asm_64.S"
14498@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14499 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14500 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14501
14502+ pax_force_retaddr
14503 ret;
14504 ENDPROC(__twofish_enc_blk8)
14505
14506@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14507 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14508 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14509
14510+ pax_force_retaddr
14511 ret;
14512 ENDPROC(__twofish_dec_blk8)
14513
14514@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14515
14516 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14517
14518+ pax_force_retaddr
14519 ret;
14520 ENDPROC(twofish_ecb_enc_8way)
14521
14522@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14523
14524 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14525
14526+ pax_force_retaddr
14527 ret;
14528 ENDPROC(twofish_ecb_dec_8way)
14529
14530@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14531 * %rdx: src
14532 */
14533
14534- pushq %r12;
14535+ pushq %r14;
14536
14537 movq %rsi, %r11;
14538- movq %rdx, %r12;
14539+ movq %rdx, %r14;
14540
14541 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14542
14543 call __twofish_dec_blk8;
14544
14545- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14546+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14547
14548- popq %r12;
14549+ popq %r14;
14550
14551+ pax_force_retaddr
14552 ret;
14553 ENDPROC(twofish_cbc_dec_8way)
14554
14555@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14556 * %rcx: iv (little endian, 128bit)
14557 */
14558
14559- pushq %r12;
14560+ pushq %r14;
14561
14562 movq %rsi, %r11;
14563- movq %rdx, %r12;
14564+ movq %rdx, %r14;
14565
14566 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14567 RD2, RX0, RX1, RY0);
14568
14569 call __twofish_enc_blk8;
14570
14571- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14572+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14573
14574- popq %r12;
14575+ popq %r14;
14576
14577+ pax_force_retaddr
14578 ret;
14579 ENDPROC(twofish_ctr_8way)
14580
14581@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14582 /* dst <= regs xor IVs(in dst) */
14583 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14584
14585+ pax_force_retaddr
14586 ret;
14587 ENDPROC(twofish_xts_enc_8way)
14588
14589@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14590 /* dst <= regs xor IVs(in dst) */
14591 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14592
14593+ pax_force_retaddr
14594 ret;
14595 ENDPROC(twofish_xts_dec_8way)
14596diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14597index 1c3b7ce..02f578d 100644
14598--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14599+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14600@@ -21,6 +21,7 @@
14601 */
14602
14603 #include <linux/linkage.h>
14604+#include <asm/alternative-asm.h>
14605
14606 .file "twofish-x86_64-asm-3way.S"
14607 .text
14608@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14609 popq %r13;
14610 popq %r14;
14611 popq %r15;
14612+ pax_force_retaddr
14613 ret;
14614
14615 .L__enc_xor3:
14616@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14617 popq %r13;
14618 popq %r14;
14619 popq %r15;
14620+ pax_force_retaddr
14621 ret;
14622 ENDPROC(__twofish_enc_blk_3way)
14623
14624@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14625 popq %r13;
14626 popq %r14;
14627 popq %r15;
14628+ pax_force_retaddr
14629 ret;
14630 ENDPROC(twofish_dec_blk_3way)
14631diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14632index a039d21..524b8b2 100644
14633--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14634+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14635@@ -22,6 +22,7 @@
14636
14637 #include <linux/linkage.h>
14638 #include <asm/asm-offsets.h>
14639+#include <asm/alternative-asm.h>
14640
14641 #define a_offset 0
14642 #define b_offset 4
14643@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14644
14645 popq R1
14646 movq $1,%rax
14647+ pax_force_retaddr
14648 ret
14649 ENDPROC(twofish_enc_blk)
14650
14651@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14652
14653 popq R1
14654 movq $1,%rax
14655+ pax_force_retaddr
14656 ret
14657 ENDPROC(twofish_dec_blk)
14658diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14659index ae6aad1..719d6d9 100644
14660--- a/arch/x86/ia32/ia32_aout.c
14661+++ b/arch/x86/ia32/ia32_aout.c
14662@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14663 unsigned long dump_start, dump_size;
14664 struct user32 dump;
14665
14666+ memset(&dump, 0, sizeof(dump));
14667+
14668 fs = get_fs();
14669 set_fs(KERNEL_DS);
14670 has_dumped = 1;
14671diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14672index f9e181a..300544c 100644
14673--- a/arch/x86/ia32/ia32_signal.c
14674+++ b/arch/x86/ia32/ia32_signal.c
14675@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14676 if (__get_user(set.sig[0], &frame->sc.oldmask)
14677 || (_COMPAT_NSIG_WORDS > 1
14678 && __copy_from_user((((char *) &set.sig) + 4),
14679- &frame->extramask,
14680+ frame->extramask,
14681 sizeof(frame->extramask))))
14682 goto badframe;
14683
14684@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14685 sp -= frame_size;
14686 /* Align the stack pointer according to the i386 ABI,
14687 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14688- sp = ((sp + 4) & -16ul) - 4;
14689+ sp = ((sp - 12) & -16ul) - 4;
14690 return (void __user *) sp;
14691 }
14692
14693@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14694 } else {
14695 /* Return stub is in 32bit vsyscall page */
14696 if (current->mm->context.vdso)
14697- restorer = current->mm->context.vdso +
14698- selected_vdso32->sym___kernel_sigreturn;
14699+ restorer = (void __force_user *)(current->mm->context.vdso +
14700+ selected_vdso32->sym___kernel_sigreturn);
14701 else
14702- restorer = &frame->retcode;
14703+ restorer = frame->retcode;
14704 }
14705
14706 put_user_try {
14707@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14708 * These are actually not used anymore, but left because some
14709 * gdb versions depend on them as a marker.
14710 */
14711- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14712+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14713 } put_user_catch(err);
14714
14715 if (err)
14716@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14717 0xb8,
14718 __NR_ia32_rt_sigreturn,
14719 0x80cd,
14720- 0,
14721+ 0
14722 };
14723
14724 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14725@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14726
14727 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14728 restorer = ksig->ka.sa.sa_restorer;
14729+ else if (current->mm->context.vdso)
14730+ /* Return stub is in 32bit vsyscall page */
14731+ restorer = (void __force_user *)(current->mm->context.vdso +
14732+ selected_vdso32->sym___kernel_rt_sigreturn);
14733 else
14734- restorer = current->mm->context.vdso +
14735- selected_vdso32->sym___kernel_rt_sigreturn;
14736+ restorer = frame->retcode;
14737 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14738
14739 /*
14740 * Not actually used anymore, but left because some gdb
14741 * versions need it.
14742 */
14743- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14744+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14745 } put_user_catch(err);
14746
14747 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14748diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14749index 82e8a1d..4e998d5 100644
14750--- a/arch/x86/ia32/ia32entry.S
14751+++ b/arch/x86/ia32/ia32entry.S
14752@@ -15,8 +15,10 @@
14753 #include <asm/irqflags.h>
14754 #include <asm/asm.h>
14755 #include <asm/smap.h>
14756+#include <asm/pgtable.h>
14757 #include <linux/linkage.h>
14758 #include <linux/err.h>
14759+#include <asm/alternative-asm.h>
14760
14761 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14762 #include <linux/elf-em.h>
14763@@ -62,12 +64,12 @@
14764 */
14765 .macro LOAD_ARGS32 offset, _r9=0
14766 .if \_r9
14767- movl \offset+16(%rsp),%r9d
14768+ movl \offset+R9(%rsp),%r9d
14769 .endif
14770- movl \offset+40(%rsp),%ecx
14771- movl \offset+48(%rsp),%edx
14772- movl \offset+56(%rsp),%esi
14773- movl \offset+64(%rsp),%edi
14774+ movl \offset+RCX(%rsp),%ecx
14775+ movl \offset+RDX(%rsp),%edx
14776+ movl \offset+RSI(%rsp),%esi
14777+ movl \offset+RDI(%rsp),%edi
14778 movl %eax,%eax /* zero extension */
14779 .endm
14780
14781@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14782 ENDPROC(native_irq_enable_sysexit)
14783 #endif
14784
14785+ .macro pax_enter_kernel_user
14786+ pax_set_fptr_mask
14787+#ifdef CONFIG_PAX_MEMORY_UDEREF
14788+ call pax_enter_kernel_user
14789+#endif
14790+ .endm
14791+
14792+ .macro pax_exit_kernel_user
14793+#ifdef CONFIG_PAX_MEMORY_UDEREF
14794+ call pax_exit_kernel_user
14795+#endif
14796+#ifdef CONFIG_PAX_RANDKSTACK
14797+ pushq %rax
14798+ pushq %r11
14799+ call pax_randomize_kstack
14800+ popq %r11
14801+ popq %rax
14802+#endif
14803+ .endm
14804+
14805+ .macro pax_erase_kstack
14806+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14807+ call pax_erase_kstack
14808+#endif
14809+ .endm
14810+
14811 /*
14812 * 32bit SYSENTER instruction entry.
14813 *
14814@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14815 CFI_REGISTER rsp,rbp
14816 SWAPGS_UNSAFE_STACK
14817 movq PER_CPU_VAR(kernel_stack), %rsp
14818- addq $(KERNEL_STACK_OFFSET),%rsp
14819- /*
14820- * No need to follow this irqs on/off section: the syscall
14821- * disabled irqs, here we enable it straight after entry:
14822- */
14823- ENABLE_INTERRUPTS(CLBR_NONE)
14824 movl %ebp,%ebp /* zero extension */
14825 pushq_cfi $__USER32_DS
14826 /*CFI_REL_OFFSET ss,0*/
14827@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14828 CFI_REL_OFFSET rsp,0
14829 pushfq_cfi
14830 /*CFI_REL_OFFSET rflags,0*/
14831- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14832- CFI_REGISTER rip,r10
14833+ orl $X86_EFLAGS_IF,(%rsp)
14834+ GET_THREAD_INFO(%r11)
14835+ movl TI_sysenter_return(%r11), %r11d
14836+ CFI_REGISTER rip,r11
14837 pushq_cfi $__USER32_CS
14838 /*CFI_REL_OFFSET cs,0*/
14839 movl %eax, %eax
14840- pushq_cfi %r10
14841+ pushq_cfi %r11
14842 CFI_REL_OFFSET rip,0
14843 pushq_cfi %rax
14844 cld
14845 SAVE_ARGS 0,1,0
14846+ pax_enter_kernel_user
14847+
14848+#ifdef CONFIG_PAX_RANDKSTACK
14849+ pax_erase_kstack
14850+#endif
14851+
14852+ /*
14853+ * No need to follow this irqs on/off section: the syscall
14854+ * disabled irqs, here we enable it straight after entry:
14855+ */
14856+ ENABLE_INTERRUPTS(CLBR_NONE)
14857 /* no need to do an access_ok check here because rbp has been
14858 32bit zero extended */
14859+
14860+#ifdef CONFIG_PAX_MEMORY_UDEREF
14861+ addq pax_user_shadow_base,%rbp
14862+ ASM_PAX_OPEN_USERLAND
14863+#endif
14864+
14865 ASM_STAC
14866 1: movl (%rbp),%ebp
14867 _ASM_EXTABLE(1b,ia32_badarg)
14868 ASM_CLAC
14869
14870+#ifdef CONFIG_PAX_MEMORY_UDEREF
14871+ ASM_PAX_CLOSE_USERLAND
14872+#endif
14873+
14874 /*
14875 * Sysenter doesn't filter flags, so we need to clear NT
14876 * ourselves. To save a few cycles, we can check whether
14877@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14878 jnz sysenter_fix_flags
14879 sysenter_flags_fixed:
14880
14881- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14882- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14883+ GET_THREAD_INFO(%r11)
14884+ orl $TS_COMPAT,TI_status(%r11)
14885+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14886 CFI_REMEMBER_STATE
14887 jnz sysenter_tracesys
14888 cmpq $(IA32_NR_syscalls-1),%rax
14889@@ -172,15 +218,18 @@ sysenter_do_call:
14890 sysenter_dispatch:
14891 call *ia32_sys_call_table(,%rax,8)
14892 movq %rax,RAX-ARGOFFSET(%rsp)
14893+ GET_THREAD_INFO(%r11)
14894 DISABLE_INTERRUPTS(CLBR_NONE)
14895 TRACE_IRQS_OFF
14896- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14897+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14898 jnz sysexit_audit
14899 sysexit_from_sys_call:
14900- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14901+ pax_exit_kernel_user
14902+ pax_erase_kstack
14903+ andl $~TS_COMPAT,TI_status(%r11)
14904 /* clear IF, that popfq doesn't enable interrupts early */
14905- andl $~0x200,EFLAGS-R11(%rsp)
14906- movl RIP-R11(%rsp),%edx /* User %eip */
14907+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14908+ movl RIP(%rsp),%edx /* User %eip */
14909 CFI_REGISTER rip,rdx
14910 RESTORE_ARGS 0,24,0,0,0,0
14911 xorq %r8,%r8
14912@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14913 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14914 movl %eax,%edi /* 1st arg: syscall number */
14915 call __audit_syscall_entry
14916+
14917+ pax_erase_kstack
14918+
14919 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14920 cmpq $(IA32_NR_syscalls-1),%rax
14921 ja ia32_badsys
14922@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14923 .endm
14924
14925 .macro auditsys_exit exit
14926- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14927+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14928 jnz ia32_ret_from_sys_call
14929 TRACE_IRQS_ON
14930 ENABLE_INTERRUPTS(CLBR_NONE)
14931@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14932 1: setbe %al /* 1 if error, 0 if not */
14933 movzbl %al,%edi /* zero-extend that into %edi */
14934 call __audit_syscall_exit
14935+ GET_THREAD_INFO(%r11)
14936 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14937 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14938 DISABLE_INTERRUPTS(CLBR_NONE)
14939 TRACE_IRQS_OFF
14940- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14941+ testl %edi,TI_flags(%r11)
14942 jz \exit
14943 CLEAR_RREGS -ARGOFFSET
14944 jmp int_with_check
14945@@ -253,7 +306,7 @@ sysenter_fix_flags:
14946
14947 sysenter_tracesys:
14948 #ifdef CONFIG_AUDITSYSCALL
14949- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14950+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14951 jz sysenter_auditsys
14952 #endif
14953 SAVE_REST
14954@@ -265,6 +318,9 @@ sysenter_tracesys:
14955 RESTORE_REST
14956 cmpq $(IA32_NR_syscalls-1),%rax
14957 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14958+
14959+ pax_erase_kstack
14960+
14961 jmp sysenter_do_call
14962 CFI_ENDPROC
14963 ENDPROC(ia32_sysenter_target)
14964@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14965 ENTRY(ia32_cstar_target)
14966 CFI_STARTPROC32 simple
14967 CFI_SIGNAL_FRAME
14968- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14969+ CFI_DEF_CFA rsp,0
14970 CFI_REGISTER rip,rcx
14971 /*CFI_REGISTER rflags,r11*/
14972 SWAPGS_UNSAFE_STACK
14973 movl %esp,%r8d
14974 CFI_REGISTER rsp,r8
14975 movq PER_CPU_VAR(kernel_stack),%rsp
14976+ SAVE_ARGS 8*6,0,0
14977+ pax_enter_kernel_user
14978+
14979+#ifdef CONFIG_PAX_RANDKSTACK
14980+ pax_erase_kstack
14981+#endif
14982+
14983 /*
14984 * No need to follow this irqs on/off section: the syscall
14985 * disabled irqs and here we enable it straight after entry:
14986 */
14987 ENABLE_INTERRUPTS(CLBR_NONE)
14988- SAVE_ARGS 8,0,0
14989 movl %eax,%eax /* zero extension */
14990 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14991 movq %rcx,RIP-ARGOFFSET(%rsp)
14992@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14993 /* no need to do an access_ok check here because r8 has been
14994 32bit zero extended */
14995 /* hardware stack frame is complete now */
14996+
14997+#ifdef CONFIG_PAX_MEMORY_UDEREF
14998+ ASM_PAX_OPEN_USERLAND
14999+ movq pax_user_shadow_base,%r8
15000+ addq RSP-ARGOFFSET(%rsp),%r8
15001+#endif
15002+
15003 ASM_STAC
15004 1: movl (%r8),%r9d
15005 _ASM_EXTABLE(1b,ia32_badarg)
15006 ASM_CLAC
15007- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15008- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15009+
15010+#ifdef CONFIG_PAX_MEMORY_UDEREF
15011+ ASM_PAX_CLOSE_USERLAND
15012+#endif
15013+
15014+ GET_THREAD_INFO(%r11)
15015+ orl $TS_COMPAT,TI_status(%r11)
15016+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15017 CFI_REMEMBER_STATE
15018 jnz cstar_tracesys
15019 cmpq $IA32_NR_syscalls-1,%rax
15020@@ -335,13 +410,16 @@ cstar_do_call:
15021 cstar_dispatch:
15022 call *ia32_sys_call_table(,%rax,8)
15023 movq %rax,RAX-ARGOFFSET(%rsp)
15024+ GET_THREAD_INFO(%r11)
15025 DISABLE_INTERRUPTS(CLBR_NONE)
15026 TRACE_IRQS_OFF
15027- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15028+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15029 jnz sysretl_audit
15030 sysretl_from_sys_call:
15031- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15032- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15033+ pax_exit_kernel_user
15034+ pax_erase_kstack
15035+ andl $~TS_COMPAT,TI_status(%r11)
15036+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15037 movl RIP-ARGOFFSET(%rsp),%ecx
15038 CFI_REGISTER rip,rcx
15039 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15040@@ -368,7 +446,7 @@ sysretl_audit:
15041
15042 cstar_tracesys:
15043 #ifdef CONFIG_AUDITSYSCALL
15044- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15045+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15046 jz cstar_auditsys
15047 #endif
15048 xchgl %r9d,%ebp
15049@@ -382,11 +460,19 @@ cstar_tracesys:
15050 xchgl %ebp,%r9d
15051 cmpq $(IA32_NR_syscalls-1),%rax
15052 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15053+
15054+ pax_erase_kstack
15055+
15056 jmp cstar_do_call
15057 END(ia32_cstar_target)
15058
15059 ia32_badarg:
15060 ASM_CLAC
15061+
15062+#ifdef CONFIG_PAX_MEMORY_UDEREF
15063+ ASM_PAX_CLOSE_USERLAND
15064+#endif
15065+
15066 movq $-EFAULT,%rax
15067 jmp ia32_sysret
15068 CFI_ENDPROC
15069@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15070 CFI_REL_OFFSET rip,RIP-RIP
15071 PARAVIRT_ADJUST_EXCEPTION_FRAME
15072 SWAPGS
15073- /*
15074- * No need to follow this irqs on/off section: the syscall
15075- * disabled irqs and here we enable it straight after entry:
15076- */
15077- ENABLE_INTERRUPTS(CLBR_NONE)
15078 movl %eax,%eax
15079 pushq_cfi %rax
15080 cld
15081 /* note the registers are not zero extended to the sf.
15082 this could be a problem. */
15083 SAVE_ARGS 0,1,0
15084- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15085- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15086+ pax_enter_kernel_user
15087+
15088+#ifdef CONFIG_PAX_RANDKSTACK
15089+ pax_erase_kstack
15090+#endif
15091+
15092+ /*
15093+ * No need to follow this irqs on/off section: the syscall
15094+ * disabled irqs and here we enable it straight after entry:
15095+ */
15096+ ENABLE_INTERRUPTS(CLBR_NONE)
15097+ GET_THREAD_INFO(%r11)
15098+ orl $TS_COMPAT,TI_status(%r11)
15099+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15100 jnz ia32_tracesys
15101 cmpq $(IA32_NR_syscalls-1),%rax
15102 ja ia32_badsys
15103@@ -458,6 +551,9 @@ ia32_tracesys:
15104 RESTORE_REST
15105 cmpq $(IA32_NR_syscalls-1),%rax
15106 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15107+
15108+ pax_erase_kstack
15109+
15110 jmp ia32_do_call
15111 END(ia32_syscall)
15112
15113diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15114index 8e0ceec..af13504 100644
15115--- a/arch/x86/ia32/sys_ia32.c
15116+++ b/arch/x86/ia32/sys_ia32.c
15117@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15118 */
15119 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15120 {
15121- typeof(ubuf->st_uid) uid = 0;
15122- typeof(ubuf->st_gid) gid = 0;
15123+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15124+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15125 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15126 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15127 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15128diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15129index 372231c..51b537d 100644
15130--- a/arch/x86/include/asm/alternative-asm.h
15131+++ b/arch/x86/include/asm/alternative-asm.h
15132@@ -18,6 +18,45 @@
15133 .endm
15134 #endif
15135
15136+#ifdef KERNEXEC_PLUGIN
15137+ .macro pax_force_retaddr_bts rip=0
15138+ btsq $63,\rip(%rsp)
15139+ .endm
15140+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15141+ .macro pax_force_retaddr rip=0, reload=0
15142+ btsq $63,\rip(%rsp)
15143+ .endm
15144+ .macro pax_force_fptr ptr
15145+ btsq $63,\ptr
15146+ .endm
15147+ .macro pax_set_fptr_mask
15148+ .endm
15149+#endif
15150+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15151+ .macro pax_force_retaddr rip=0, reload=0
15152+ .if \reload
15153+ pax_set_fptr_mask
15154+ .endif
15155+ orq %r12,\rip(%rsp)
15156+ .endm
15157+ .macro pax_force_fptr ptr
15158+ orq %r12,\ptr
15159+ .endm
15160+ .macro pax_set_fptr_mask
15161+ movabs $0x8000000000000000,%r12
15162+ .endm
15163+#endif
15164+#else
15165+ .macro pax_force_retaddr rip=0, reload=0
15166+ .endm
15167+ .macro pax_force_fptr ptr
15168+ .endm
15169+ .macro pax_force_retaddr_bts rip=0
15170+ .endm
15171+ .macro pax_set_fptr_mask
15172+ .endm
15173+#endif
15174+
15175 .macro altinstruction_entry orig alt feature orig_len alt_len
15176 .long \orig - .
15177 .long \alt - .
15178diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15179index 473bdbe..b1e3377 100644
15180--- a/arch/x86/include/asm/alternative.h
15181+++ b/arch/x86/include/asm/alternative.h
15182@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15183 ".pushsection .discard,\"aw\",@progbits\n" \
15184 DISCARD_ENTRY(1) \
15185 ".popsection\n" \
15186- ".pushsection .altinstr_replacement, \"ax\"\n" \
15187+ ".pushsection .altinstr_replacement, \"a\"\n" \
15188 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15189 ".popsection"
15190
15191@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15192 DISCARD_ENTRY(1) \
15193 DISCARD_ENTRY(2) \
15194 ".popsection\n" \
15195- ".pushsection .altinstr_replacement, \"ax\"\n" \
15196+ ".pushsection .altinstr_replacement, \"a\"\n" \
15197 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15198 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15199 ".popsection"
15200diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15201index 465b309..ab7e51f 100644
15202--- a/arch/x86/include/asm/apic.h
15203+++ b/arch/x86/include/asm/apic.h
15204@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15205
15206 #ifdef CONFIG_X86_LOCAL_APIC
15207
15208-extern unsigned int apic_verbosity;
15209+extern int apic_verbosity;
15210 extern int local_apic_timer_c2_ok;
15211
15212 extern int disable_apic;
15213diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15214index 20370c6..a2eb9b0 100644
15215--- a/arch/x86/include/asm/apm.h
15216+++ b/arch/x86/include/asm/apm.h
15217@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15218 __asm__ __volatile__(APM_DO_ZERO_SEGS
15219 "pushl %%edi\n\t"
15220 "pushl %%ebp\n\t"
15221- "lcall *%%cs:apm_bios_entry\n\t"
15222+ "lcall *%%ss:apm_bios_entry\n\t"
15223 "setc %%al\n\t"
15224 "popl %%ebp\n\t"
15225 "popl %%edi\n\t"
15226@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15227 __asm__ __volatile__(APM_DO_ZERO_SEGS
15228 "pushl %%edi\n\t"
15229 "pushl %%ebp\n\t"
15230- "lcall *%%cs:apm_bios_entry\n\t"
15231+ "lcall *%%ss:apm_bios_entry\n\t"
15232 "setc %%bl\n\t"
15233 "popl %%ebp\n\t"
15234 "popl %%edi\n\t"
15235diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15236index 5e5cd12..51cdc93 100644
15237--- a/arch/x86/include/asm/atomic.h
15238+++ b/arch/x86/include/asm/atomic.h
15239@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15240 }
15241
15242 /**
15243+ * atomic_read_unchecked - read atomic variable
15244+ * @v: pointer of type atomic_unchecked_t
15245+ *
15246+ * Atomically reads the value of @v.
15247+ */
15248+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15249+{
15250+ return ACCESS_ONCE((v)->counter);
15251+}
15252+
15253+/**
15254 * atomic_set - set atomic variable
15255 * @v: pointer of type atomic_t
15256 * @i: required value
15257@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15258 }
15259
15260 /**
15261+ * atomic_set_unchecked - set atomic variable
15262+ * @v: pointer of type atomic_unchecked_t
15263+ * @i: required value
15264+ *
15265+ * Atomically sets the value of @v to @i.
15266+ */
15267+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15268+{
15269+ v->counter = i;
15270+}
15271+
15272+/**
15273 * atomic_add - add integer to atomic variable
15274 * @i: integer value to add
15275 * @v: pointer of type atomic_t
15276@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15277 */
15278 static inline void atomic_add(int i, atomic_t *v)
15279 {
15280- asm volatile(LOCK_PREFIX "addl %1,%0"
15281+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15282+
15283+#ifdef CONFIG_PAX_REFCOUNT
15284+ "jno 0f\n"
15285+ LOCK_PREFIX "subl %1,%0\n"
15286+ "int $4\n0:\n"
15287+ _ASM_EXTABLE(0b, 0b)
15288+#endif
15289+
15290+ : "+m" (v->counter)
15291+ : "ir" (i));
15292+}
15293+
15294+/**
15295+ * atomic_add_unchecked - add integer to atomic variable
15296+ * @i: integer value to add
15297+ * @v: pointer of type atomic_unchecked_t
15298+ *
15299+ * Atomically adds @i to @v.
15300+ */
15301+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15302+{
15303+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15304 : "+m" (v->counter)
15305 : "ir" (i));
15306 }
15307@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15308 */
15309 static inline void atomic_sub(int i, atomic_t *v)
15310 {
15311- asm volatile(LOCK_PREFIX "subl %1,%0"
15312+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15313+
15314+#ifdef CONFIG_PAX_REFCOUNT
15315+ "jno 0f\n"
15316+ LOCK_PREFIX "addl %1,%0\n"
15317+ "int $4\n0:\n"
15318+ _ASM_EXTABLE(0b, 0b)
15319+#endif
15320+
15321+ : "+m" (v->counter)
15322+ : "ir" (i));
15323+}
15324+
15325+/**
15326+ * atomic_sub_unchecked - subtract integer from atomic variable
15327+ * @i: integer value to subtract
15328+ * @v: pointer of type atomic_unchecked_t
15329+ *
15330+ * Atomically subtracts @i from @v.
15331+ */
15332+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15333+{
15334+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15335 : "+m" (v->counter)
15336 : "ir" (i));
15337 }
15338@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15339 */
15340 static inline int atomic_sub_and_test(int i, atomic_t *v)
15341 {
15342- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15343+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15344 }
15345
15346 /**
15347@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15348 */
15349 static inline void atomic_inc(atomic_t *v)
15350 {
15351- asm volatile(LOCK_PREFIX "incl %0"
15352+ asm volatile(LOCK_PREFIX "incl %0\n"
15353+
15354+#ifdef CONFIG_PAX_REFCOUNT
15355+ "jno 0f\n"
15356+ LOCK_PREFIX "decl %0\n"
15357+ "int $4\n0:\n"
15358+ _ASM_EXTABLE(0b, 0b)
15359+#endif
15360+
15361+ : "+m" (v->counter));
15362+}
15363+
15364+/**
15365+ * atomic_inc_unchecked - increment atomic variable
15366+ * @v: pointer of type atomic_unchecked_t
15367+ *
15368+ * Atomically increments @v by 1.
15369+ */
15370+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15371+{
15372+ asm volatile(LOCK_PREFIX "incl %0\n"
15373 : "+m" (v->counter));
15374 }
15375
15376@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15377 */
15378 static inline void atomic_dec(atomic_t *v)
15379 {
15380- asm volatile(LOCK_PREFIX "decl %0"
15381+ asm volatile(LOCK_PREFIX "decl %0\n"
15382+
15383+#ifdef CONFIG_PAX_REFCOUNT
15384+ "jno 0f\n"
15385+ LOCK_PREFIX "incl %0\n"
15386+ "int $4\n0:\n"
15387+ _ASM_EXTABLE(0b, 0b)
15388+#endif
15389+
15390+ : "+m" (v->counter));
15391+}
15392+
15393+/**
15394+ * atomic_dec_unchecked - decrement atomic variable
15395+ * @v: pointer of type atomic_unchecked_t
15396+ *
15397+ * Atomically decrements @v by 1.
15398+ */
15399+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15400+{
15401+ asm volatile(LOCK_PREFIX "decl %0\n"
15402 : "+m" (v->counter));
15403 }
15404
15405@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15406 */
15407 static inline int atomic_dec_and_test(atomic_t *v)
15408 {
15409- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15410+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15411 }
15412
15413 /**
15414@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15415 */
15416 static inline int atomic_inc_and_test(atomic_t *v)
15417 {
15418- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15419+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15420+}
15421+
15422+/**
15423+ * atomic_inc_and_test_unchecked - increment and test
15424+ * @v: pointer of type atomic_unchecked_t
15425+ *
15426+ * Atomically increments @v by 1
15427+ * and returns true if the result is zero, or false for all
15428+ * other cases.
15429+ */
15430+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15431+{
15432+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15433 }
15434
15435 /**
15436@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15437 */
15438 static inline int atomic_add_negative(int i, atomic_t *v)
15439 {
15440- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15441+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15442 }
15443
15444 /**
15445@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15446 *
15447 * Atomically adds @i to @v and returns @i + @v
15448 */
15449-static inline int atomic_add_return(int i, atomic_t *v)
15450+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15451+{
15452+ return i + xadd_check_overflow(&v->counter, i);
15453+}
15454+
15455+/**
15456+ * atomic_add_return_unchecked - add integer and return
15457+ * @i: integer value to add
15458+ * @v: pointer of type atomic_unchecked_t
15459+ *
15460+ * Atomically adds @i to @v and returns @i + @v
15461+ */
15462+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15463 {
15464 return i + xadd(&v->counter, i);
15465 }
15466@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15467 *
15468 * Atomically subtracts @i from @v and returns @v - @i
15469 */
15470-static inline int atomic_sub_return(int i, atomic_t *v)
15471+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15472 {
15473 return atomic_add_return(-i, v);
15474 }
15475
15476 #define atomic_inc_return(v) (atomic_add_return(1, v))
15477+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15478+{
15479+ return atomic_add_return_unchecked(1, v);
15480+}
15481 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15482
15483-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15484+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15485+{
15486+ return cmpxchg(&v->counter, old, new);
15487+}
15488+
15489+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15490 {
15491 return cmpxchg(&v->counter, old, new);
15492 }
15493@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15494 return xchg(&v->counter, new);
15495 }
15496
15497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15498+{
15499+ return xchg(&v->counter, new);
15500+}
15501+
15502 /**
15503 * __atomic_add_unless - add unless the number is already a given value
15504 * @v: pointer of type atomic_t
15505@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15506 */
15507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15508 {
15509- int c, old;
15510+ int c, old, new;
15511 c = atomic_read(v);
15512 for (;;) {
15513- if (unlikely(c == (u)))
15514+ if (unlikely(c == u))
15515 break;
15516- old = atomic_cmpxchg((v), c, c + (a));
15517+
15518+ asm volatile("addl %2,%0\n"
15519+
15520+#ifdef CONFIG_PAX_REFCOUNT
15521+ "jno 0f\n"
15522+ "subl %2,%0\n"
15523+ "int $4\n0:\n"
15524+ _ASM_EXTABLE(0b, 0b)
15525+#endif
15526+
15527+ : "=r" (new)
15528+ : "0" (c), "ir" (a));
15529+
15530+ old = atomic_cmpxchg(v, c, new);
15531 if (likely(old == c))
15532 break;
15533 c = old;
15534@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15535 }
15536
15537 /**
15538+ * atomic_inc_not_zero_hint - increment if not null
15539+ * @v: pointer of type atomic_t
15540+ * @hint: probable value of the atomic before the increment
15541+ *
15542+ * This version of atomic_inc_not_zero() gives a hint of probable
15543+ * value of the atomic. This helps processor to not read the memory
15544+ * before doing the atomic read/modify/write cycle, lowering
15545+ * number of bus transactions on some arches.
15546+ *
15547+ * Returns: 0 if increment was not done, 1 otherwise.
15548+ */
15549+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15550+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15551+{
15552+ int val, c = hint, new;
15553+
15554+ /* sanity test, should be removed by compiler if hint is a constant */
15555+ if (!hint)
15556+ return __atomic_add_unless(v, 1, 0);
15557+
15558+ do {
15559+ asm volatile("incl %0\n"
15560+
15561+#ifdef CONFIG_PAX_REFCOUNT
15562+ "jno 0f\n"
15563+ "decl %0\n"
15564+ "int $4\n0:\n"
15565+ _ASM_EXTABLE(0b, 0b)
15566+#endif
15567+
15568+ : "=r" (new)
15569+ : "0" (c));
15570+
15571+ val = atomic_cmpxchg(v, c, new);
15572+ if (val == c)
15573+ return 1;
15574+ c = val;
15575+ } while (c);
15576+
15577+ return 0;
15578+}
15579+
15580+/**
15581 * atomic_inc_short - increment of a short integer
15582 * @v: pointer to type int
15583 *
15584@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15585 }
15586
15587 /* These are x86-specific, used by some header files */
15588-#define atomic_clear_mask(mask, addr) \
15589- asm volatile(LOCK_PREFIX "andl %0,%1" \
15590- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15591+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15592+{
15593+ asm volatile(LOCK_PREFIX "andl %1,%0"
15594+ : "+m" (v->counter)
15595+ : "r" (~(mask))
15596+ : "memory");
15597+}
15598
15599-#define atomic_set_mask(mask, addr) \
15600- asm volatile(LOCK_PREFIX "orl %0,%1" \
15601- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15602- : "memory")
15603+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15604+{
15605+ asm volatile(LOCK_PREFIX "andl %1,%0"
15606+ : "+m" (v->counter)
15607+ : "r" (~(mask))
15608+ : "memory");
15609+}
15610+
15611+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15612+{
15613+ asm volatile(LOCK_PREFIX "orl %1,%0"
15614+ : "+m" (v->counter)
15615+ : "r" (mask)
15616+ : "memory");
15617+}
15618+
15619+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15620+{
15621+ asm volatile(LOCK_PREFIX "orl %1,%0"
15622+ : "+m" (v->counter)
15623+ : "r" (mask)
15624+ : "memory");
15625+}
15626
15627 #ifdef CONFIG_X86_32
15628 # include <asm/atomic64_32.h>
15629diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15630index b154de7..bf18a5a 100644
15631--- a/arch/x86/include/asm/atomic64_32.h
15632+++ b/arch/x86/include/asm/atomic64_32.h
15633@@ -12,6 +12,14 @@ typedef struct {
15634 u64 __aligned(8) counter;
15635 } atomic64_t;
15636
15637+#ifdef CONFIG_PAX_REFCOUNT
15638+typedef struct {
15639+ u64 __aligned(8) counter;
15640+} atomic64_unchecked_t;
15641+#else
15642+typedef atomic64_t atomic64_unchecked_t;
15643+#endif
15644+
15645 #define ATOMIC64_INIT(val) { (val) }
15646
15647 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15648@@ -37,21 +45,31 @@ typedef struct {
15649 ATOMIC64_DECL_ONE(sym##_386)
15650
15651 ATOMIC64_DECL_ONE(add_386);
15652+ATOMIC64_DECL_ONE(add_unchecked_386);
15653 ATOMIC64_DECL_ONE(sub_386);
15654+ATOMIC64_DECL_ONE(sub_unchecked_386);
15655 ATOMIC64_DECL_ONE(inc_386);
15656+ATOMIC64_DECL_ONE(inc_unchecked_386);
15657 ATOMIC64_DECL_ONE(dec_386);
15658+ATOMIC64_DECL_ONE(dec_unchecked_386);
15659 #endif
15660
15661 #define alternative_atomic64(f, out, in...) \
15662 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15663
15664 ATOMIC64_DECL(read);
15665+ATOMIC64_DECL(read_unchecked);
15666 ATOMIC64_DECL(set);
15667+ATOMIC64_DECL(set_unchecked);
15668 ATOMIC64_DECL(xchg);
15669 ATOMIC64_DECL(add_return);
15670+ATOMIC64_DECL(add_return_unchecked);
15671 ATOMIC64_DECL(sub_return);
15672+ATOMIC64_DECL(sub_return_unchecked);
15673 ATOMIC64_DECL(inc_return);
15674+ATOMIC64_DECL(inc_return_unchecked);
15675 ATOMIC64_DECL(dec_return);
15676+ATOMIC64_DECL(dec_return_unchecked);
15677 ATOMIC64_DECL(dec_if_positive);
15678 ATOMIC64_DECL(inc_not_zero);
15679 ATOMIC64_DECL(add_unless);
15680@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15681 }
15682
15683 /**
15684+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15685+ * @p: pointer to type atomic64_unchecked_t
15686+ * @o: expected value
15687+ * @n: new value
15688+ *
15689+ * Atomically sets @v to @n if it was equal to @o and returns
15690+ * the old value.
15691+ */
15692+
15693+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15694+{
15695+ return cmpxchg64(&v->counter, o, n);
15696+}
15697+
15698+/**
15699 * atomic64_xchg - xchg atomic64 variable
15700 * @v: pointer to type atomic64_t
15701 * @n: value to assign
15702@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15703 }
15704
15705 /**
15706+ * atomic64_set_unchecked - set atomic64 variable
15707+ * @v: pointer to type atomic64_unchecked_t
15708+ * @n: value to assign
15709+ *
15710+ * Atomically sets the value of @v to @n.
15711+ */
15712+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15713+{
15714+ unsigned high = (unsigned)(i >> 32);
15715+ unsigned low = (unsigned)i;
15716+ alternative_atomic64(set, /* no output */,
15717+ "S" (v), "b" (low), "c" (high)
15718+ : "eax", "edx", "memory");
15719+}
15720+
15721+/**
15722 * atomic64_read - read atomic64 variable
15723 * @v: pointer to type atomic64_t
15724 *
15725@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15726 }
15727
15728 /**
15729+ * atomic64_read_unchecked - read atomic64 variable
15730+ * @v: pointer to type atomic64_unchecked_t
15731+ *
15732+ * Atomically reads the value of @v and returns it.
15733+ */
15734+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15735+{
15736+ long long r;
15737+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15738+ return r;
15739+ }
15740+
15741+/**
15742 * atomic64_add_return - add and return
15743 * @i: integer value to add
15744 * @v: pointer to type atomic64_t
15745@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15746 return i;
15747 }
15748
15749+/**
15750+ * atomic64_add_return_unchecked - add and return
15751+ * @i: integer value to add
15752+ * @v: pointer to type atomic64_unchecked_t
15753+ *
15754+ * Atomically adds @i to @v and returns @i + *@v
15755+ */
15756+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15757+{
15758+ alternative_atomic64(add_return_unchecked,
15759+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15760+ ASM_NO_INPUT_CLOBBER("memory"));
15761+ return i;
15762+}
15763+
15764 /*
15765 * Other variants with different arithmetic operators:
15766 */
15767@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15768 return a;
15769 }
15770
15771+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15772+{
15773+ long long a;
15774+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15775+ "S" (v) : "memory", "ecx");
15776+ return a;
15777+}
15778+
15779 static inline long long atomic64_dec_return(atomic64_t *v)
15780 {
15781 long long a;
15782@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15783 }
15784
15785 /**
15786+ * atomic64_add_unchecked - add integer to atomic64 variable
15787+ * @i: integer value to add
15788+ * @v: pointer to type atomic64_unchecked_t
15789+ *
15790+ * Atomically adds @i to @v.
15791+ */
15792+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15793+{
15794+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15795+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15796+ ASM_NO_INPUT_CLOBBER("memory"));
15797+ return i;
15798+}
15799+
15800+/**
15801 * atomic64_sub - subtract the atomic64 variable
15802 * @i: integer value to subtract
15803 * @v: pointer to type atomic64_t
15804diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15805index f8d273e..02f39f3 100644
15806--- a/arch/x86/include/asm/atomic64_64.h
15807+++ b/arch/x86/include/asm/atomic64_64.h
15808@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15809 }
15810
15811 /**
15812+ * atomic64_read_unchecked - read atomic64 variable
15813+ * @v: pointer of type atomic64_unchecked_t
15814+ *
15815+ * Atomically reads the value of @v.
15816+ * Doesn't imply a read memory barrier.
15817+ */
15818+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15819+{
15820+ return ACCESS_ONCE((v)->counter);
15821+}
15822+
15823+/**
15824 * atomic64_set - set atomic64 variable
15825 * @v: pointer to type atomic64_t
15826 * @i: required value
15827@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15828 }
15829
15830 /**
15831+ * atomic64_set_unchecked - set atomic64 variable
15832+ * @v: pointer to type atomic64_unchecked_t
15833+ * @i: required value
15834+ *
15835+ * Atomically sets the value of @v to @i.
15836+ */
15837+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15838+{
15839+ v->counter = i;
15840+}
15841+
15842+/**
15843 * atomic64_add - add integer to atomic64 variable
15844 * @i: integer value to add
15845 * @v: pointer to type atomic64_t
15846@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15847 */
15848 static inline void atomic64_add(long i, atomic64_t *v)
15849 {
15850+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15851+
15852+#ifdef CONFIG_PAX_REFCOUNT
15853+ "jno 0f\n"
15854+ LOCK_PREFIX "subq %1,%0\n"
15855+ "int $4\n0:\n"
15856+ _ASM_EXTABLE(0b, 0b)
15857+#endif
15858+
15859+ : "=m" (v->counter)
15860+ : "er" (i), "m" (v->counter));
15861+}
15862+
15863+/**
15864+ * atomic64_add_unchecked - add integer to atomic64 variable
15865+ * @i: integer value to add
15866+ * @v: pointer to type atomic64_unchecked_t
15867+ *
15868+ * Atomically adds @i to @v.
15869+ */
15870+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15871+{
15872 asm volatile(LOCK_PREFIX "addq %1,%0"
15873 : "=m" (v->counter)
15874 : "er" (i), "m" (v->counter));
15875@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15876 */
15877 static inline void atomic64_sub(long i, atomic64_t *v)
15878 {
15879- asm volatile(LOCK_PREFIX "subq %1,%0"
15880+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15881+
15882+#ifdef CONFIG_PAX_REFCOUNT
15883+ "jno 0f\n"
15884+ LOCK_PREFIX "addq %1,%0\n"
15885+ "int $4\n0:\n"
15886+ _ASM_EXTABLE(0b, 0b)
15887+#endif
15888+
15889+ : "=m" (v->counter)
15890+ : "er" (i), "m" (v->counter));
15891+}
15892+
15893+/**
15894+ * atomic64_sub_unchecked - subtract the atomic64 variable
15895+ * @i: integer value to subtract
15896+ * @v: pointer to type atomic64_unchecked_t
15897+ *
15898+ * Atomically subtracts @i from @v.
15899+ */
15900+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15901+{
15902+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15903 : "=m" (v->counter)
15904 : "er" (i), "m" (v->counter));
15905 }
15906@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15907 */
15908 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15909 {
15910- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15911+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15912 }
15913
15914 /**
15915@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15916 */
15917 static inline void atomic64_inc(atomic64_t *v)
15918 {
15919+ asm volatile(LOCK_PREFIX "incq %0\n"
15920+
15921+#ifdef CONFIG_PAX_REFCOUNT
15922+ "jno 0f\n"
15923+ LOCK_PREFIX "decq %0\n"
15924+ "int $4\n0:\n"
15925+ _ASM_EXTABLE(0b, 0b)
15926+#endif
15927+
15928+ : "=m" (v->counter)
15929+ : "m" (v->counter));
15930+}
15931+
15932+/**
15933+ * atomic64_inc_unchecked - increment atomic64 variable
15934+ * @v: pointer to type atomic64_unchecked_t
15935+ *
15936+ * Atomically increments @v by 1.
15937+ */
15938+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15939+{
15940 asm volatile(LOCK_PREFIX "incq %0"
15941 : "=m" (v->counter)
15942 : "m" (v->counter));
15943@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15944 */
15945 static inline void atomic64_dec(atomic64_t *v)
15946 {
15947- asm volatile(LOCK_PREFIX "decq %0"
15948+ asm volatile(LOCK_PREFIX "decq %0\n"
15949+
15950+#ifdef CONFIG_PAX_REFCOUNT
15951+ "jno 0f\n"
15952+ LOCK_PREFIX "incq %0\n"
15953+ "int $4\n0:\n"
15954+ _ASM_EXTABLE(0b, 0b)
15955+#endif
15956+
15957+ : "=m" (v->counter)
15958+ : "m" (v->counter));
15959+}
15960+
15961+/**
15962+ * atomic64_dec_unchecked - decrement atomic64 variable
15963+ * @v: pointer to type atomic64_t
15964+ *
15965+ * Atomically decrements @v by 1.
15966+ */
15967+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15968+{
15969+ asm volatile(LOCK_PREFIX "decq %0\n"
15970 : "=m" (v->counter)
15971 : "m" (v->counter));
15972 }
15973@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15974 */
15975 static inline int atomic64_dec_and_test(atomic64_t *v)
15976 {
15977- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15978+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15979 }
15980
15981 /**
15982@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15983 */
15984 static inline int atomic64_inc_and_test(atomic64_t *v)
15985 {
15986- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15987+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15988 }
15989
15990 /**
15991@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15992 */
15993 static inline int atomic64_add_negative(long i, atomic64_t *v)
15994 {
15995- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15996+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15997 }
15998
15999 /**
16000@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16001 */
16002 static inline long atomic64_add_return(long i, atomic64_t *v)
16003 {
16004+ return i + xadd_check_overflow(&v->counter, i);
16005+}
16006+
16007+/**
16008+ * atomic64_add_return_unchecked - add and return
16009+ * @i: integer value to add
16010+ * @v: pointer to type atomic64_unchecked_t
16011+ *
16012+ * Atomically adds @i to @v and returns @i + @v
16013+ */
16014+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16015+{
16016 return i + xadd(&v->counter, i);
16017 }
16018
16019@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16020 }
16021
16022 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16023+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16024+{
16025+ return atomic64_add_return_unchecked(1, v);
16026+}
16027 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16028
16029 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16030@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16031 return cmpxchg(&v->counter, old, new);
16032 }
16033
16034+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16035+{
16036+ return cmpxchg(&v->counter, old, new);
16037+}
16038+
16039 static inline long atomic64_xchg(atomic64_t *v, long new)
16040 {
16041 return xchg(&v->counter, new);
16042@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16043 */
16044 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16045 {
16046- long c, old;
16047+ long c, old, new;
16048 c = atomic64_read(v);
16049 for (;;) {
16050- if (unlikely(c == (u)))
16051+ if (unlikely(c == u))
16052 break;
16053- old = atomic64_cmpxchg((v), c, c + (a));
16054+
16055+ asm volatile("add %2,%0\n"
16056+
16057+#ifdef CONFIG_PAX_REFCOUNT
16058+ "jno 0f\n"
16059+ "sub %2,%0\n"
16060+ "int $4\n0:\n"
16061+ _ASM_EXTABLE(0b, 0b)
16062+#endif
16063+
16064+ : "=r" (new)
16065+ : "0" (c), "ir" (a));
16066+
16067+ old = atomic64_cmpxchg(v, c, new);
16068 if (likely(old == c))
16069 break;
16070 c = old;
16071 }
16072- return c != (u);
16073+ return c != u;
16074 }
16075
16076 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16077diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16078index 2ab1eb3..1e8cc5d 100644
16079--- a/arch/x86/include/asm/barrier.h
16080+++ b/arch/x86/include/asm/barrier.h
16081@@ -57,7 +57,7 @@
16082 do { \
16083 compiletime_assert_atomic_type(*p); \
16084 smp_mb(); \
16085- ACCESS_ONCE(*p) = (v); \
16086+ ACCESS_ONCE_RW(*p) = (v); \
16087 } while (0)
16088
16089 #define smp_load_acquire(p) \
16090@@ -74,7 +74,7 @@ do { \
16091 do { \
16092 compiletime_assert_atomic_type(*p); \
16093 barrier(); \
16094- ACCESS_ONCE(*p) = (v); \
16095+ ACCESS_ONCE_RW(*p) = (v); \
16096 } while (0)
16097
16098 #define smp_load_acquire(p) \
16099diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16100index cfe3b95..d01b118 100644
16101--- a/arch/x86/include/asm/bitops.h
16102+++ b/arch/x86/include/asm/bitops.h
16103@@ -50,7 +50,7 @@
16104 * a mask operation on a byte.
16105 */
16106 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16107-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16108+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16109 #define CONST_MASK(nr) (1 << ((nr) & 7))
16110
16111 /**
16112@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16113 */
16114 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16115 {
16116- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16117+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16118 }
16119
16120 /**
16121@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16122 */
16123 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16124 {
16125- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16126+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16127 }
16128
16129 /**
16130@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16131 */
16132 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16133 {
16134- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16135+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16136 }
16137
16138 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16139@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16140 *
16141 * Undefined if no bit exists, so code should check against 0 first.
16142 */
16143-static inline unsigned long __ffs(unsigned long word)
16144+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16145 {
16146 asm("rep; bsf %1,%0"
16147 : "=r" (word)
16148@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16149 *
16150 * Undefined if no zero exists, so code should check against ~0UL first.
16151 */
16152-static inline unsigned long ffz(unsigned long word)
16153+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16154 {
16155 asm("rep; bsf %1,%0"
16156 : "=r" (word)
16157@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16158 *
16159 * Undefined if no set bit exists, so code should check against 0 first.
16160 */
16161-static inline unsigned long __fls(unsigned long word)
16162+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16163 {
16164 asm("bsr %1,%0"
16165 : "=r" (word)
16166@@ -434,7 +434,7 @@ static inline int ffs(int x)
16167 * set bit if value is nonzero. The last (most significant) bit is
16168 * at position 32.
16169 */
16170-static inline int fls(int x)
16171+static inline int __intentional_overflow(-1) fls(int x)
16172 {
16173 int r;
16174
16175@@ -476,7 +476,7 @@ static inline int fls(int x)
16176 * at position 64.
16177 */
16178 #ifdef CONFIG_X86_64
16179-static __always_inline int fls64(__u64 x)
16180+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16181 {
16182 int bitpos = -1;
16183 /*
16184diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16185index 4fa687a..60f2d39 100644
16186--- a/arch/x86/include/asm/boot.h
16187+++ b/arch/x86/include/asm/boot.h
16188@@ -6,10 +6,15 @@
16189 #include <uapi/asm/boot.h>
16190
16191 /* Physical address where kernel should be loaded. */
16192-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16193+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16194 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16195 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16196
16197+#ifndef __ASSEMBLY__
16198+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16199+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16200+#endif
16201+
16202 /* Minimum kernel alignment, as a power of two */
16203 #ifdef CONFIG_X86_64
16204 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16205diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16206index 48f99f1..d78ebf9 100644
16207--- a/arch/x86/include/asm/cache.h
16208+++ b/arch/x86/include/asm/cache.h
16209@@ -5,12 +5,13 @@
16210
16211 /* L1 cache line size */
16212 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16213-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16214+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16215
16216 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16217+#define __read_only __attribute__((__section__(".data..read_only")))
16218
16219 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16220-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16221+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16222
16223 #ifdef CONFIG_X86_VSMP
16224 #ifdef CONFIG_SMP
16225diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16226index 76659b6..72b8439 100644
16227--- a/arch/x86/include/asm/calling.h
16228+++ b/arch/x86/include/asm/calling.h
16229@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16230 #define RSP 152
16231 #define SS 160
16232
16233-#define ARGOFFSET R11
16234-#define SWFRAME ORIG_RAX
16235+#define ARGOFFSET R15
16236
16237 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16238- subq $9*8+\addskip, %rsp
16239- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16240- movq_cfi rdi, 8*8
16241- movq_cfi rsi, 7*8
16242- movq_cfi rdx, 6*8
16243+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16244+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16245+ movq_cfi rdi, RDI
16246+ movq_cfi rsi, RSI
16247+ movq_cfi rdx, RDX
16248
16249 .if \save_rcx
16250- movq_cfi rcx, 5*8
16251+ movq_cfi rcx, RCX
16252 .endif
16253
16254 .if \rax_enosys
16255- movq $-ENOSYS, 4*8(%rsp)
16256+ movq $-ENOSYS, RAX(%rsp)
16257 .else
16258- movq_cfi rax, 4*8
16259+ movq_cfi rax, RAX
16260 .endif
16261
16262 .if \save_r891011
16263- movq_cfi r8, 3*8
16264- movq_cfi r9, 2*8
16265- movq_cfi r10, 1*8
16266- movq_cfi r11, 0*8
16267+ movq_cfi r8, R8
16268+ movq_cfi r9, R9
16269+ movq_cfi r10, R10
16270+ movq_cfi r11, R11
16271 .endif
16272
16273+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16274+ movq_cfi r12, R12
16275+#endif
16276+
16277 .endm
16278
16279-#define ARG_SKIP (9*8)
16280+#define ARG_SKIP ORIG_RAX
16281
16282 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16283 rstor_r8910=1, rstor_rdx=1
16284+
16285+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16286+ movq_cfi_restore R12, r12
16287+#endif
16288+
16289 .if \rstor_r11
16290- movq_cfi_restore 0*8, r11
16291+ movq_cfi_restore R11, r11
16292 .endif
16293
16294 .if \rstor_r8910
16295- movq_cfi_restore 1*8, r10
16296- movq_cfi_restore 2*8, r9
16297- movq_cfi_restore 3*8, r8
16298+ movq_cfi_restore R10, r10
16299+ movq_cfi_restore R9, r9
16300+ movq_cfi_restore R8, r8
16301 .endif
16302
16303 .if \rstor_rax
16304- movq_cfi_restore 4*8, rax
16305+ movq_cfi_restore RAX, rax
16306 .endif
16307
16308 .if \rstor_rcx
16309- movq_cfi_restore 5*8, rcx
16310+ movq_cfi_restore RCX, rcx
16311 .endif
16312
16313 .if \rstor_rdx
16314- movq_cfi_restore 6*8, rdx
16315+ movq_cfi_restore RDX, rdx
16316 .endif
16317
16318- movq_cfi_restore 7*8, rsi
16319- movq_cfi_restore 8*8, rdi
16320+ movq_cfi_restore RSI, rsi
16321+ movq_cfi_restore RDI, rdi
16322
16323- .if ARG_SKIP+\addskip > 0
16324- addq $ARG_SKIP+\addskip, %rsp
16325- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16326+ .if ORIG_RAX+\addskip > 0
16327+ addq $ORIG_RAX+\addskip, %rsp
16328+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16329 .endif
16330 .endm
16331
16332- .macro LOAD_ARGS offset, skiprax=0
16333- movq \offset(%rsp), %r11
16334- movq \offset+8(%rsp), %r10
16335- movq \offset+16(%rsp), %r9
16336- movq \offset+24(%rsp), %r8
16337- movq \offset+40(%rsp), %rcx
16338- movq \offset+48(%rsp), %rdx
16339- movq \offset+56(%rsp), %rsi
16340- movq \offset+64(%rsp), %rdi
16341+ .macro LOAD_ARGS skiprax=0
16342+ movq R11(%rsp), %r11
16343+ movq R10(%rsp), %r10
16344+ movq R9(%rsp), %r9
16345+ movq R8(%rsp), %r8
16346+ movq RCX(%rsp), %rcx
16347+ movq RDX(%rsp), %rdx
16348+ movq RSI(%rsp), %rsi
16349+ movq RDI(%rsp), %rdi
16350 .if \skiprax
16351 .else
16352- movq \offset+72(%rsp), %rax
16353+ movq ORIG_RAX(%rsp), %rax
16354 .endif
16355 .endm
16356
16357-#define REST_SKIP (6*8)
16358-
16359 .macro SAVE_REST
16360- subq $REST_SKIP, %rsp
16361- CFI_ADJUST_CFA_OFFSET REST_SKIP
16362- movq_cfi rbx, 5*8
16363- movq_cfi rbp, 4*8
16364- movq_cfi r12, 3*8
16365- movq_cfi r13, 2*8
16366- movq_cfi r14, 1*8
16367- movq_cfi r15, 0*8
16368+ movq_cfi rbx, RBX
16369+ movq_cfi rbp, RBP
16370+
16371+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16372+ movq_cfi r12, R12
16373+#endif
16374+
16375+ movq_cfi r13, R13
16376+ movq_cfi r14, R14
16377+ movq_cfi r15, R15
16378 .endm
16379
16380 .macro RESTORE_REST
16381- movq_cfi_restore 0*8, r15
16382- movq_cfi_restore 1*8, r14
16383- movq_cfi_restore 2*8, r13
16384- movq_cfi_restore 3*8, r12
16385- movq_cfi_restore 4*8, rbp
16386- movq_cfi_restore 5*8, rbx
16387- addq $REST_SKIP, %rsp
16388- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16389+ movq_cfi_restore R15, r15
16390+ movq_cfi_restore R14, r14
16391+ movq_cfi_restore R13, r13
16392+
16393+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16394+ movq_cfi_restore R12, r12
16395+#endif
16396+
16397+ movq_cfi_restore RBP, rbp
16398+ movq_cfi_restore RBX, rbx
16399 .endm
16400
16401 .macro SAVE_ALL
16402diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16403index f50de69..2b0a458 100644
16404--- a/arch/x86/include/asm/checksum_32.h
16405+++ b/arch/x86/include/asm/checksum_32.h
16406@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16407 int len, __wsum sum,
16408 int *src_err_ptr, int *dst_err_ptr);
16409
16410+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16411+ int len, __wsum sum,
16412+ int *src_err_ptr, int *dst_err_ptr);
16413+
16414+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16415+ int len, __wsum sum,
16416+ int *src_err_ptr, int *dst_err_ptr);
16417+
16418 /*
16419 * Note: when you get a NULL pointer exception here this means someone
16420 * passed in an incorrect kernel address to one of these functions.
16421@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16422
16423 might_sleep();
16424 stac();
16425- ret = csum_partial_copy_generic((__force void *)src, dst,
16426+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16427 len, sum, err_ptr, NULL);
16428 clac();
16429
16430@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16431 might_sleep();
16432 if (access_ok(VERIFY_WRITE, dst, len)) {
16433 stac();
16434- ret = csum_partial_copy_generic(src, (__force void *)dst,
16435+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16436 len, sum, NULL, err_ptr);
16437 clac();
16438 return ret;
16439diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16440index 99c105d7..2f667ac 100644
16441--- a/arch/x86/include/asm/cmpxchg.h
16442+++ b/arch/x86/include/asm/cmpxchg.h
16443@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16444 __compiletime_error("Bad argument size for cmpxchg");
16445 extern void __xadd_wrong_size(void)
16446 __compiletime_error("Bad argument size for xadd");
16447+extern void __xadd_check_overflow_wrong_size(void)
16448+ __compiletime_error("Bad argument size for xadd_check_overflow");
16449 extern void __add_wrong_size(void)
16450 __compiletime_error("Bad argument size for add");
16451+extern void __add_check_overflow_wrong_size(void)
16452+ __compiletime_error("Bad argument size for add_check_overflow");
16453
16454 /*
16455 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16456@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16457 __ret; \
16458 })
16459
16460+#ifdef CONFIG_PAX_REFCOUNT
16461+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16462+ ({ \
16463+ __typeof__ (*(ptr)) __ret = (arg); \
16464+ switch (sizeof(*(ptr))) { \
16465+ case __X86_CASE_L: \
16466+ asm volatile (lock #op "l %0, %1\n" \
16467+ "jno 0f\n" \
16468+ "mov %0,%1\n" \
16469+ "int $4\n0:\n" \
16470+ _ASM_EXTABLE(0b, 0b) \
16471+ : "+r" (__ret), "+m" (*(ptr)) \
16472+ : : "memory", "cc"); \
16473+ break; \
16474+ case __X86_CASE_Q: \
16475+ asm volatile (lock #op "q %q0, %1\n" \
16476+ "jno 0f\n" \
16477+ "mov %0,%1\n" \
16478+ "int $4\n0:\n" \
16479+ _ASM_EXTABLE(0b, 0b) \
16480+ : "+r" (__ret), "+m" (*(ptr)) \
16481+ : : "memory", "cc"); \
16482+ break; \
16483+ default: \
16484+ __ ## op ## _check_overflow_wrong_size(); \
16485+ } \
16486+ __ret; \
16487+ })
16488+#else
16489+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16490+#endif
16491+
16492 /*
16493 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16494 * Since this is generally used to protect other memory information, we
16495@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16496 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16497 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16498
16499+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16500+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16501+
16502 #define __add(ptr, inc, lock) \
16503 ({ \
16504 __typeof__ (*(ptr)) __ret = (inc); \
16505diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16506index 59c6c40..5e0b22c 100644
16507--- a/arch/x86/include/asm/compat.h
16508+++ b/arch/x86/include/asm/compat.h
16509@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16510 typedef u32 compat_uint_t;
16511 typedef u32 compat_ulong_t;
16512 typedef u64 __attribute__((aligned(4))) compat_u64;
16513-typedef u32 compat_uptr_t;
16514+typedef u32 __user compat_uptr_t;
16515
16516 struct compat_timespec {
16517 compat_time_t tv_sec;
16518diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16519index aede2c3..40d7a8f 100644
16520--- a/arch/x86/include/asm/cpufeature.h
16521+++ b/arch/x86/include/asm/cpufeature.h
16522@@ -212,7 +212,7 @@
16523 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16524 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16525 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16526-
16527+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16528
16529 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16530 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16531@@ -220,7 +220,7 @@
16532 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16533 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16534 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16535-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16536+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16537 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16538 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16539 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16540@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16541 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16542 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16543 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16544+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16545
16546 #if __GNUC__ >= 4
16547 extern void warn_pre_alternatives(void);
16548@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16549
16550 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16551 t_warn:
16552- warn_pre_alternatives();
16553+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16554+ warn_pre_alternatives();
16555 return false;
16556 #endif
16557
16558@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16559 ".section .discard,\"aw\",@progbits\n"
16560 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16561 ".previous\n"
16562- ".section .altinstr_replacement,\"ax\"\n"
16563+ ".section .altinstr_replacement,\"a\"\n"
16564 "3: movb $1,%0\n"
16565 "4:\n"
16566 ".previous\n"
16567@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16568 " .byte 2b - 1b\n" /* src len */
16569 " .byte 4f - 3f\n" /* repl len */
16570 ".previous\n"
16571- ".section .altinstr_replacement,\"ax\"\n"
16572+ ".section .altinstr_replacement,\"a\"\n"
16573 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16574 "4:\n"
16575 ".previous\n"
16576@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16577 ".section .discard,\"aw\",@progbits\n"
16578 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16579 ".previous\n"
16580- ".section .altinstr_replacement,\"ax\"\n"
16581+ ".section .altinstr_replacement,\"a\"\n"
16582 "3: movb $0,%0\n"
16583 "4:\n"
16584 ".previous\n"
16585@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16586 ".section .discard,\"aw\",@progbits\n"
16587 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16588 ".previous\n"
16589- ".section .altinstr_replacement,\"ax\"\n"
16590+ ".section .altinstr_replacement,\"a\"\n"
16591 "5: movb $1,%0\n"
16592 "6:\n"
16593 ".previous\n"
16594diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16595index a94b82e..59ecefa 100644
16596--- a/arch/x86/include/asm/desc.h
16597+++ b/arch/x86/include/asm/desc.h
16598@@ -4,6 +4,7 @@
16599 #include <asm/desc_defs.h>
16600 #include <asm/ldt.h>
16601 #include <asm/mmu.h>
16602+#include <asm/pgtable.h>
16603
16604 #include <linux/smp.h>
16605 #include <linux/percpu.h>
16606@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16607
16608 desc->type = (info->read_exec_only ^ 1) << 1;
16609 desc->type |= info->contents << 2;
16610+ desc->type |= info->seg_not_present ^ 1;
16611
16612 desc->s = 1;
16613 desc->dpl = 0x3;
16614@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16615 }
16616
16617 extern struct desc_ptr idt_descr;
16618-extern gate_desc idt_table[];
16619-extern struct desc_ptr debug_idt_descr;
16620-extern gate_desc debug_idt_table[];
16621-
16622-struct gdt_page {
16623- struct desc_struct gdt[GDT_ENTRIES];
16624-} __attribute__((aligned(PAGE_SIZE)));
16625-
16626-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16627+extern gate_desc idt_table[IDT_ENTRIES];
16628+extern const struct desc_ptr debug_idt_descr;
16629+extern gate_desc debug_idt_table[IDT_ENTRIES];
16630
16631+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16632 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16633 {
16634- return per_cpu(gdt_page, cpu).gdt;
16635+ return cpu_gdt_table[cpu];
16636 }
16637
16638 #ifdef CONFIG_X86_64
16639@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16640 unsigned long base, unsigned dpl, unsigned flags,
16641 unsigned short seg)
16642 {
16643- gate->a = (seg << 16) | (base & 0xffff);
16644- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16645+ gate->gate.offset_low = base;
16646+ gate->gate.seg = seg;
16647+ gate->gate.reserved = 0;
16648+ gate->gate.type = type;
16649+ gate->gate.s = 0;
16650+ gate->gate.dpl = dpl;
16651+ gate->gate.p = 1;
16652+ gate->gate.offset_high = base >> 16;
16653 }
16654
16655 #endif
16656@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16657
16658 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16659 {
16660+ pax_open_kernel();
16661 memcpy(&idt[entry], gate, sizeof(*gate));
16662+ pax_close_kernel();
16663 }
16664
16665 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16666 {
16667+ pax_open_kernel();
16668 memcpy(&ldt[entry], desc, 8);
16669+ pax_close_kernel();
16670 }
16671
16672 static inline void
16673@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16674 default: size = sizeof(*gdt); break;
16675 }
16676
16677+ pax_open_kernel();
16678 memcpy(&gdt[entry], desc, size);
16679+ pax_close_kernel();
16680 }
16681
16682 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16683@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16684
16685 static inline void native_load_tr_desc(void)
16686 {
16687+ pax_open_kernel();
16688 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16689+ pax_close_kernel();
16690 }
16691
16692 static inline void native_load_gdt(const struct desc_ptr *dtr)
16693@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16694 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16695 unsigned int i;
16696
16697+ pax_open_kernel();
16698 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16699 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16700+ pax_close_kernel();
16701 }
16702
16703 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16704@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16705 preempt_enable();
16706 }
16707
16708-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16709+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16710 {
16711 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16712 }
16713@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16714 }
16715
16716 #ifdef CONFIG_X86_64
16717-static inline void set_nmi_gate(int gate, void *addr)
16718+static inline void set_nmi_gate(int gate, const void *addr)
16719 {
16720 gate_desc s;
16721
16722@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16723 #endif
16724
16725 #ifdef CONFIG_TRACING
16726-extern struct desc_ptr trace_idt_descr;
16727-extern gate_desc trace_idt_table[];
16728+extern const struct desc_ptr trace_idt_descr;
16729+extern gate_desc trace_idt_table[IDT_ENTRIES];
16730 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16731 {
16732 write_idt_entry(trace_idt_table, entry, gate);
16733 }
16734
16735-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16736+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16737 unsigned dpl, unsigned ist, unsigned seg)
16738 {
16739 gate_desc s;
16740@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16741 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16742 #endif
16743
16744-static inline void _set_gate(int gate, unsigned type, void *addr,
16745+static inline void _set_gate(int gate, unsigned type, const void *addr,
16746 unsigned dpl, unsigned ist, unsigned seg)
16747 {
16748 gate_desc s;
16749@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16750 #define set_intr_gate(n, addr) \
16751 do { \
16752 BUG_ON((unsigned)n > 0xFF); \
16753- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16754+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16755 __KERNEL_CS); \
16756- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16757+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16758 0, 0, __KERNEL_CS); \
16759 } while (0)
16760
16761@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16762 /*
16763 * This routine sets up an interrupt gate at directory privilege level 3.
16764 */
16765-static inline void set_system_intr_gate(unsigned int n, void *addr)
16766+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16767 {
16768 BUG_ON((unsigned)n > 0xFF);
16769 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16770 }
16771
16772-static inline void set_system_trap_gate(unsigned int n, void *addr)
16773+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16774 {
16775 BUG_ON((unsigned)n > 0xFF);
16776 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16777 }
16778
16779-static inline void set_trap_gate(unsigned int n, void *addr)
16780+static inline void set_trap_gate(unsigned int n, const void *addr)
16781 {
16782 BUG_ON((unsigned)n > 0xFF);
16783 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16784@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16785 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16786 {
16787 BUG_ON((unsigned)n > 0xFF);
16788- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16789+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16790 }
16791
16792-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16793+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16794 {
16795 BUG_ON((unsigned)n > 0xFF);
16796 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16797 }
16798
16799-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16800+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16801 {
16802 BUG_ON((unsigned)n > 0xFF);
16803 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16804@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16805 else
16806 load_idt((const struct desc_ptr *)&idt_descr);
16807 }
16808+
16809+#ifdef CONFIG_X86_32
16810+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16811+{
16812+ struct desc_struct d;
16813+
16814+ if (likely(limit))
16815+ limit = (limit - 1UL) >> PAGE_SHIFT;
16816+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16817+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16818+}
16819+#endif
16820+
16821 #endif /* _ASM_X86_DESC_H */
16822diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16823index 278441f..b95a174 100644
16824--- a/arch/x86/include/asm/desc_defs.h
16825+++ b/arch/x86/include/asm/desc_defs.h
16826@@ -31,6 +31,12 @@ struct desc_struct {
16827 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16828 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16829 };
16830+ struct {
16831+ u16 offset_low;
16832+ u16 seg;
16833+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16834+ unsigned offset_high: 16;
16835+ } gate;
16836 };
16837 } __attribute__((packed));
16838
16839diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16840index ced283a..ffe04cc 100644
16841--- a/arch/x86/include/asm/div64.h
16842+++ b/arch/x86/include/asm/div64.h
16843@@ -39,7 +39,7 @@
16844 __mod; \
16845 })
16846
16847-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16848+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16849 {
16850 union {
16851 u64 v64;
16852diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16853index ca3347a..1a5082a 100644
16854--- a/arch/x86/include/asm/elf.h
16855+++ b/arch/x86/include/asm/elf.h
16856@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16857
16858 #include <asm/vdso.h>
16859
16860-#ifdef CONFIG_X86_64
16861-extern unsigned int vdso64_enabled;
16862-#endif
16863 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16864 extern unsigned int vdso32_enabled;
16865 #endif
16866@@ -249,7 +246,25 @@ extern int force_personality32;
16867 the loader. We need to make sure that it is out of the way of the program
16868 that it will "exec", and that there is sufficient room for the brk. */
16869
16870+#ifdef CONFIG_PAX_SEGMEXEC
16871+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16872+#else
16873 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16874+#endif
16875+
16876+#ifdef CONFIG_PAX_ASLR
16877+#ifdef CONFIG_X86_32
16878+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16879+
16880+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16881+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16882+#else
16883+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16884+
16885+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16886+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16887+#endif
16888+#endif
16889
16890 /* This yields a mask that user programs can use to figure out what
16891 instruction set this CPU supports. This could be done in user space,
16892@@ -298,17 +313,13 @@ do { \
16893
16894 #define ARCH_DLINFO \
16895 do { \
16896- if (vdso64_enabled) \
16897- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16898- (unsigned long __force)current->mm->context.vdso); \
16899+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16900 } while (0)
16901
16902 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16903 #define ARCH_DLINFO_X32 \
16904 do { \
16905- if (vdso64_enabled) \
16906- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16907- (unsigned long __force)current->mm->context.vdso); \
16908+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16909 } while (0)
16910
16911 #define AT_SYSINFO 32
16912@@ -323,10 +334,10 @@ else \
16913
16914 #endif /* !CONFIG_X86_32 */
16915
16916-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16917+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16918
16919 #define VDSO_ENTRY \
16920- ((unsigned long)current->mm->context.vdso + \
16921+ (current->mm->context.vdso + \
16922 selected_vdso32->sym___kernel_vsyscall)
16923
16924 struct linux_binprm;
16925@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16926 int uses_interp);
16927 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16928
16929-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16930-#define arch_randomize_brk arch_randomize_brk
16931-
16932 /*
16933 * True on X86_32 or when emulating IA32 on X86_64
16934 */
16935diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16936index 77a99ac..39ff7f5 100644
16937--- a/arch/x86/include/asm/emergency-restart.h
16938+++ b/arch/x86/include/asm/emergency-restart.h
16939@@ -1,6 +1,6 @@
16940 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16941 #define _ASM_X86_EMERGENCY_RESTART_H
16942
16943-extern void machine_emergency_restart(void);
16944+extern void machine_emergency_restart(void) __noreturn;
16945
16946 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16947diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16948index 1c7eefe..d0e4702 100644
16949--- a/arch/x86/include/asm/floppy.h
16950+++ b/arch/x86/include/asm/floppy.h
16951@@ -229,18 +229,18 @@ static struct fd_routine_l {
16952 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16953 } fd_routine[] = {
16954 {
16955- request_dma,
16956- free_dma,
16957- get_dma_residue,
16958- dma_mem_alloc,
16959- hard_dma_setup
16960+ ._request_dma = request_dma,
16961+ ._free_dma = free_dma,
16962+ ._get_dma_residue = get_dma_residue,
16963+ ._dma_mem_alloc = dma_mem_alloc,
16964+ ._dma_setup = hard_dma_setup
16965 },
16966 {
16967- vdma_request_dma,
16968- vdma_nop,
16969- vdma_get_dma_residue,
16970- vdma_mem_alloc,
16971- vdma_dma_setup
16972+ ._request_dma = vdma_request_dma,
16973+ ._free_dma = vdma_nop,
16974+ ._get_dma_residue = vdma_get_dma_residue,
16975+ ._dma_mem_alloc = vdma_mem_alloc,
16976+ ._dma_setup = vdma_dma_setup
16977 }
16978 };
16979
16980diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16981index f895358..800c60d 100644
16982--- a/arch/x86/include/asm/fpu-internal.h
16983+++ b/arch/x86/include/asm/fpu-internal.h
16984@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16985 #define user_insn(insn, output, input...) \
16986 ({ \
16987 int err; \
16988+ pax_open_userland(); \
16989 asm volatile(ASM_STAC "\n" \
16990- "1:" #insn "\n\t" \
16991+ "1:" \
16992+ __copyuser_seg \
16993+ #insn "\n\t" \
16994 "2: " ASM_CLAC "\n" \
16995 ".section .fixup,\"ax\"\n" \
16996 "3: movl $-1,%[err]\n" \
16997@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16998 _ASM_EXTABLE(1b, 3b) \
16999 : [err] "=r" (err), output \
17000 : "0"(0), input); \
17001+ pax_close_userland(); \
17002 err; \
17003 })
17004
17005@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17006 "fnclex\n\t"
17007 "emms\n\t"
17008 "fildl %P[addr]" /* set F?P to defined value */
17009- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17010+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17011 }
17012
17013 return fpu_restore_checking(&tsk->thread.fpu);
17014diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17015index b4c1f54..e290c08 100644
17016--- a/arch/x86/include/asm/futex.h
17017+++ b/arch/x86/include/asm/futex.h
17018@@ -12,6 +12,7 @@
17019 #include <asm/smap.h>
17020
17021 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17022+ typecheck(u32 __user *, uaddr); \
17023 asm volatile("\t" ASM_STAC "\n" \
17024 "1:\t" insn "\n" \
17025 "2:\t" ASM_CLAC "\n" \
17026@@ -20,15 +21,16 @@
17027 "\tjmp\t2b\n" \
17028 "\t.previous\n" \
17029 _ASM_EXTABLE(1b, 3b) \
17030- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17031+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17032 : "i" (-EFAULT), "0" (oparg), "1" (0))
17033
17034 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17035+ typecheck(u32 __user *, uaddr); \
17036 asm volatile("\t" ASM_STAC "\n" \
17037 "1:\tmovl %2, %0\n" \
17038 "\tmovl\t%0, %3\n" \
17039 "\t" insn "\n" \
17040- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17041+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17042 "\tjnz\t1b\n" \
17043 "3:\t" ASM_CLAC "\n" \
17044 "\t.section .fixup,\"ax\"\n" \
17045@@ -38,7 +40,7 @@
17046 _ASM_EXTABLE(1b, 4b) \
17047 _ASM_EXTABLE(2b, 4b) \
17048 : "=&a" (oldval), "=&r" (ret), \
17049- "+m" (*uaddr), "=&r" (tem) \
17050+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17051 : "r" (oparg), "i" (-EFAULT), "1" (0))
17052
17053 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17054@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17055
17056 pagefault_disable();
17057
17058+ pax_open_userland();
17059 switch (op) {
17060 case FUTEX_OP_SET:
17061- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17062+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17063 break;
17064 case FUTEX_OP_ADD:
17065- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17066+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17067 uaddr, oparg);
17068 break;
17069 case FUTEX_OP_OR:
17070@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17071 default:
17072 ret = -ENOSYS;
17073 }
17074+ pax_close_userland();
17075
17076 pagefault_enable();
17077
17078diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17079index 9662290..49ca5e5 100644
17080--- a/arch/x86/include/asm/hw_irq.h
17081+++ b/arch/x86/include/asm/hw_irq.h
17082@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17083 #endif /* CONFIG_X86_LOCAL_APIC */
17084
17085 /* Statistics */
17086-extern atomic_t irq_err_count;
17087-extern atomic_t irq_mis_count;
17088+extern atomic_unchecked_t irq_err_count;
17089+extern atomic_unchecked_t irq_mis_count;
17090
17091 /* EISA */
17092 extern void eisa_set_level_irq(unsigned int irq);
17093diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17094index ccffa53..3c90c87 100644
17095--- a/arch/x86/include/asm/i8259.h
17096+++ b/arch/x86/include/asm/i8259.h
17097@@ -62,7 +62,7 @@ struct legacy_pic {
17098 void (*init)(int auto_eoi);
17099 int (*irq_pending)(unsigned int irq);
17100 void (*make_irq)(unsigned int irq);
17101-};
17102+} __do_const;
17103
17104 extern struct legacy_pic *legacy_pic;
17105 extern struct legacy_pic null_legacy_pic;
17106diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17107index 34a5b93..27e40a6 100644
17108--- a/arch/x86/include/asm/io.h
17109+++ b/arch/x86/include/asm/io.h
17110@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17111 "m" (*(volatile type __force *)addr) barrier); }
17112
17113 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17114-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17115-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17116+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17117+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17118
17119 build_mmio_read(__readb, "b", unsigned char, "=q", )
17120-build_mmio_read(__readw, "w", unsigned short, "=r", )
17121-build_mmio_read(__readl, "l", unsigned int, "=r", )
17122+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17123+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17124
17125 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17126 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17127@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17128 * this function
17129 */
17130
17131-static inline phys_addr_t virt_to_phys(volatile void *address)
17132+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17133 {
17134 return __pa(address);
17135 }
17136@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17137 return ioremap_nocache(offset, size);
17138 }
17139
17140-extern void iounmap(volatile void __iomem *addr);
17141+extern void iounmap(const volatile void __iomem *addr);
17142
17143 extern void set_iounmap_nonlazy(void);
17144
17145@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17146
17147 #include <linux/vmalloc.h>
17148
17149+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17150+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17151+{
17152+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17153+}
17154+
17155+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17156+{
17157+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17158+}
17159+
17160 /*
17161 * Convert a virtual cached pointer to an uncached pointer
17162 */
17163diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17164index 0a8b519..80e7d5b 100644
17165--- a/arch/x86/include/asm/irqflags.h
17166+++ b/arch/x86/include/asm/irqflags.h
17167@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17168 sti; \
17169 sysexit
17170
17171+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17172+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17173+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17174+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17175+
17176 #else
17177 #define INTERRUPT_RETURN iret
17178 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17179diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17180index 4421b5d..8543006 100644
17181--- a/arch/x86/include/asm/kprobes.h
17182+++ b/arch/x86/include/asm/kprobes.h
17183@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17184 #define RELATIVEJUMP_SIZE 5
17185 #define RELATIVECALL_OPCODE 0xe8
17186 #define RELATIVE_ADDR_SIZE 4
17187-#define MAX_STACK_SIZE 64
17188-#define MIN_STACK_SIZE(ADDR) \
17189- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17190- THREAD_SIZE - (unsigned long)(ADDR))) \
17191- ? (MAX_STACK_SIZE) \
17192- : (((unsigned long)current_thread_info()) + \
17193- THREAD_SIZE - (unsigned long)(ADDR)))
17194+#define MAX_STACK_SIZE 64UL
17195+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17196
17197 #define flush_insn_slot(p) do { } while (0)
17198
17199diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17200index d89c6b8..e711c69 100644
17201--- a/arch/x86/include/asm/kvm_host.h
17202+++ b/arch/x86/include/asm/kvm_host.h
17203@@ -51,7 +51,7 @@
17204 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17205
17206 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17207-#define CR3_PCID_INVD (1UL << 63)
17208+#define CR3_PCID_INVD (1ULL << 63)
17209 #define CR4_RESERVED_BITS \
17210 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17211 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17212diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17213index 4ad6560..75c7bdd 100644
17214--- a/arch/x86/include/asm/local.h
17215+++ b/arch/x86/include/asm/local.h
17216@@ -10,33 +10,97 @@ typedef struct {
17217 atomic_long_t a;
17218 } local_t;
17219
17220+typedef struct {
17221+ atomic_long_unchecked_t a;
17222+} local_unchecked_t;
17223+
17224 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17225
17226 #define local_read(l) atomic_long_read(&(l)->a)
17227+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17228 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17229+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17230
17231 static inline void local_inc(local_t *l)
17232 {
17233- asm volatile(_ASM_INC "%0"
17234+ asm volatile(_ASM_INC "%0\n"
17235+
17236+#ifdef CONFIG_PAX_REFCOUNT
17237+ "jno 0f\n"
17238+ _ASM_DEC "%0\n"
17239+ "int $4\n0:\n"
17240+ _ASM_EXTABLE(0b, 0b)
17241+#endif
17242+
17243+ : "+m" (l->a.counter));
17244+}
17245+
17246+static inline void local_inc_unchecked(local_unchecked_t *l)
17247+{
17248+ asm volatile(_ASM_INC "%0\n"
17249 : "+m" (l->a.counter));
17250 }
17251
17252 static inline void local_dec(local_t *l)
17253 {
17254- asm volatile(_ASM_DEC "%0"
17255+ asm volatile(_ASM_DEC "%0\n"
17256+
17257+#ifdef CONFIG_PAX_REFCOUNT
17258+ "jno 0f\n"
17259+ _ASM_INC "%0\n"
17260+ "int $4\n0:\n"
17261+ _ASM_EXTABLE(0b, 0b)
17262+#endif
17263+
17264+ : "+m" (l->a.counter));
17265+}
17266+
17267+static inline void local_dec_unchecked(local_unchecked_t *l)
17268+{
17269+ asm volatile(_ASM_DEC "%0\n"
17270 : "+m" (l->a.counter));
17271 }
17272
17273 static inline void local_add(long i, local_t *l)
17274 {
17275- asm volatile(_ASM_ADD "%1,%0"
17276+ asm volatile(_ASM_ADD "%1,%0\n"
17277+
17278+#ifdef CONFIG_PAX_REFCOUNT
17279+ "jno 0f\n"
17280+ _ASM_SUB "%1,%0\n"
17281+ "int $4\n0:\n"
17282+ _ASM_EXTABLE(0b, 0b)
17283+#endif
17284+
17285+ : "+m" (l->a.counter)
17286+ : "ir" (i));
17287+}
17288+
17289+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17290+{
17291+ asm volatile(_ASM_ADD "%1,%0\n"
17292 : "+m" (l->a.counter)
17293 : "ir" (i));
17294 }
17295
17296 static inline void local_sub(long i, local_t *l)
17297 {
17298- asm volatile(_ASM_SUB "%1,%0"
17299+ asm volatile(_ASM_SUB "%1,%0\n"
17300+
17301+#ifdef CONFIG_PAX_REFCOUNT
17302+ "jno 0f\n"
17303+ _ASM_ADD "%1,%0\n"
17304+ "int $4\n0:\n"
17305+ _ASM_EXTABLE(0b, 0b)
17306+#endif
17307+
17308+ : "+m" (l->a.counter)
17309+ : "ir" (i));
17310+}
17311+
17312+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17313+{
17314+ asm volatile(_ASM_SUB "%1,%0\n"
17315 : "+m" (l->a.counter)
17316 : "ir" (i));
17317 }
17318@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17319 */
17320 static inline int local_sub_and_test(long i, local_t *l)
17321 {
17322- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17323+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17324 }
17325
17326 /**
17327@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17328 */
17329 static inline int local_dec_and_test(local_t *l)
17330 {
17331- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17332+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17333 }
17334
17335 /**
17336@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17337 */
17338 static inline int local_inc_and_test(local_t *l)
17339 {
17340- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17341+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17342 }
17343
17344 /**
17345@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17346 */
17347 static inline int local_add_negative(long i, local_t *l)
17348 {
17349- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17350+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17351 }
17352
17353 /**
17354@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17355 static inline long local_add_return(long i, local_t *l)
17356 {
17357 long __i = i;
17358+ asm volatile(_ASM_XADD "%0, %1\n"
17359+
17360+#ifdef CONFIG_PAX_REFCOUNT
17361+ "jno 0f\n"
17362+ _ASM_MOV "%0,%1\n"
17363+ "int $4\n0:\n"
17364+ _ASM_EXTABLE(0b, 0b)
17365+#endif
17366+
17367+ : "+r" (i), "+m" (l->a.counter)
17368+ : : "memory");
17369+ return i + __i;
17370+}
17371+
17372+/**
17373+ * local_add_return_unchecked - add and return
17374+ * @i: integer value to add
17375+ * @l: pointer to type local_unchecked_t
17376+ *
17377+ * Atomically adds @i to @l and returns @i + @l
17378+ */
17379+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17380+{
17381+ long __i = i;
17382 asm volatile(_ASM_XADD "%0, %1;"
17383 : "+r" (i), "+m" (l->a.counter)
17384 : : "memory");
17385@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17386
17387 #define local_cmpxchg(l, o, n) \
17388 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17389+#define local_cmpxchg_unchecked(l, o, n) \
17390+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17391 /* Always has a lock prefix */
17392 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17393
17394diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17395new file mode 100644
17396index 0000000..2bfd3ba
17397--- /dev/null
17398+++ b/arch/x86/include/asm/mman.h
17399@@ -0,0 +1,15 @@
17400+#ifndef _X86_MMAN_H
17401+#define _X86_MMAN_H
17402+
17403+#include <uapi/asm/mman.h>
17404+
17405+#ifdef __KERNEL__
17406+#ifndef __ASSEMBLY__
17407+#ifdef CONFIG_X86_32
17408+#define arch_mmap_check i386_mmap_check
17409+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17410+#endif
17411+#endif
17412+#endif
17413+
17414+#endif /* X86_MMAN_H */
17415diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17416index 876e74e..e20bfb1 100644
17417--- a/arch/x86/include/asm/mmu.h
17418+++ b/arch/x86/include/asm/mmu.h
17419@@ -9,7 +9,7 @@
17420 * we put the segment information here.
17421 */
17422 typedef struct {
17423- void *ldt;
17424+ struct desc_struct *ldt;
17425 int size;
17426
17427 #ifdef CONFIG_X86_64
17428@@ -18,7 +18,19 @@ typedef struct {
17429 #endif
17430
17431 struct mutex lock;
17432- void __user *vdso;
17433+ unsigned long vdso;
17434+
17435+#ifdef CONFIG_X86_32
17436+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17437+ unsigned long user_cs_base;
17438+ unsigned long user_cs_limit;
17439+
17440+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17441+ cpumask_t cpu_user_cs_mask;
17442+#endif
17443+
17444+#endif
17445+#endif
17446 } mm_context_t;
17447
17448 #ifdef CONFIG_SMP
17449diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17450index 4b75d59..8ffacb6 100644
17451--- a/arch/x86/include/asm/mmu_context.h
17452+++ b/arch/x86/include/asm/mmu_context.h
17453@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17454
17455 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17456 {
17457+
17458+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17459+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17460+ unsigned int i;
17461+ pgd_t *pgd;
17462+
17463+ pax_open_kernel();
17464+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17465+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17466+ set_pgd_batched(pgd+i, native_make_pgd(0));
17467+ pax_close_kernel();
17468+ }
17469+#endif
17470+
17471 #ifdef CONFIG_SMP
17472 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17473 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17474@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17475 struct task_struct *tsk)
17476 {
17477 unsigned cpu = smp_processor_id();
17478+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17479+ int tlbstate = TLBSTATE_OK;
17480+#endif
17481
17482 if (likely(prev != next)) {
17483 #ifdef CONFIG_SMP
17484+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17485+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17486+#endif
17487 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17488 this_cpu_write(cpu_tlbstate.active_mm, next);
17489 #endif
17490 cpumask_set_cpu(cpu, mm_cpumask(next));
17491
17492 /* Re-load page tables */
17493+#ifdef CONFIG_PAX_PER_CPU_PGD
17494+ pax_open_kernel();
17495+
17496+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17497+ if (static_cpu_has(X86_FEATURE_PCID))
17498+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17499+ else
17500+#endif
17501+
17502+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17503+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17504+ pax_close_kernel();
17505+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17506+
17507+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17508+ if (static_cpu_has(X86_FEATURE_PCID)) {
17509+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17510+ u64 descriptor[2];
17511+ descriptor[0] = PCID_USER;
17512+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17513+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17514+ descriptor[0] = PCID_KERNEL;
17515+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17516+ }
17517+ } else {
17518+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17519+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17520+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17521+ else
17522+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17523+ }
17524+ } else
17525+#endif
17526+
17527+ load_cr3(get_cpu_pgd(cpu, kernel));
17528+#else
17529 load_cr3(next->pgd);
17530+#endif
17531 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17532
17533 /* Stop flush ipis for the previous mm */
17534@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17535 */
17536 if (unlikely(prev->context.ldt != next->context.ldt))
17537 load_LDT_nolock(&next->context);
17538+
17539+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17540+ if (!(__supported_pte_mask & _PAGE_NX)) {
17541+ smp_mb__before_atomic();
17542+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17543+ smp_mb__after_atomic();
17544+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17545+ }
17546+#endif
17547+
17548+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17549+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17550+ prev->context.user_cs_limit != next->context.user_cs_limit))
17551+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17552+#ifdef CONFIG_SMP
17553+ else if (unlikely(tlbstate != TLBSTATE_OK))
17554+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17555+#endif
17556+#endif
17557+
17558 }
17559+ else {
17560+
17561+#ifdef CONFIG_PAX_PER_CPU_PGD
17562+ pax_open_kernel();
17563+
17564+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17565+ if (static_cpu_has(X86_FEATURE_PCID))
17566+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17567+ else
17568+#endif
17569+
17570+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17571+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17572+ pax_close_kernel();
17573+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17574+
17575+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17576+ if (static_cpu_has(X86_FEATURE_PCID)) {
17577+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17578+ u64 descriptor[2];
17579+ descriptor[0] = PCID_USER;
17580+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17581+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17582+ descriptor[0] = PCID_KERNEL;
17583+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17584+ }
17585+ } else {
17586+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17587+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17588+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17589+ else
17590+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17591+ }
17592+ } else
17593+#endif
17594+
17595+ load_cr3(get_cpu_pgd(cpu, kernel));
17596+#endif
17597+
17598 #ifdef CONFIG_SMP
17599- else {
17600 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17601 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17602
17603@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17604 * tlb flush IPI delivery. We must reload CR3
17605 * to make sure to use no freed page tables.
17606 */
17607+
17608+#ifndef CONFIG_PAX_PER_CPU_PGD
17609 load_cr3(next->pgd);
17610 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17611+#endif
17612+
17613 load_LDT_nolock(&next->context);
17614+
17615+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17616+ if (!(__supported_pte_mask & _PAGE_NX))
17617+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17618+#endif
17619+
17620+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17621+#ifdef CONFIG_PAX_PAGEEXEC
17622+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17623+#endif
17624+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17625+#endif
17626+
17627 }
17628+#endif
17629 }
17630-#endif
17631 }
17632
17633 #define activate_mm(prev, next) \
17634diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17635index e3b7819..b257c64 100644
17636--- a/arch/x86/include/asm/module.h
17637+++ b/arch/x86/include/asm/module.h
17638@@ -5,6 +5,7 @@
17639
17640 #ifdef CONFIG_X86_64
17641 /* X86_64 does not define MODULE_PROC_FAMILY */
17642+#define MODULE_PROC_FAMILY ""
17643 #elif defined CONFIG_M486
17644 #define MODULE_PROC_FAMILY "486 "
17645 #elif defined CONFIG_M586
17646@@ -57,8 +58,20 @@
17647 #error unknown processor family
17648 #endif
17649
17650-#ifdef CONFIG_X86_32
17651-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17652+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17653+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17654+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17655+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17656+#else
17657+#define MODULE_PAX_KERNEXEC ""
17658 #endif
17659
17660+#ifdef CONFIG_PAX_MEMORY_UDEREF
17661+#define MODULE_PAX_UDEREF "UDEREF "
17662+#else
17663+#define MODULE_PAX_UDEREF ""
17664+#endif
17665+
17666+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17667+
17668 #endif /* _ASM_X86_MODULE_H */
17669diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17670index 5f2fc44..106caa6 100644
17671--- a/arch/x86/include/asm/nmi.h
17672+++ b/arch/x86/include/asm/nmi.h
17673@@ -36,26 +36,35 @@ enum {
17674
17675 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17676
17677+struct nmiaction;
17678+
17679+struct nmiwork {
17680+ const struct nmiaction *action;
17681+ u64 max_duration;
17682+ struct irq_work irq_work;
17683+};
17684+
17685 struct nmiaction {
17686 struct list_head list;
17687 nmi_handler_t handler;
17688- u64 max_duration;
17689- struct irq_work irq_work;
17690 unsigned long flags;
17691 const char *name;
17692-};
17693+ struct nmiwork *work;
17694+} __do_const;
17695
17696 #define register_nmi_handler(t, fn, fg, n, init...) \
17697 ({ \
17698- static struct nmiaction init fn##_na = { \
17699+ static struct nmiwork fn##_nw; \
17700+ static const struct nmiaction init fn##_na = { \
17701 .handler = (fn), \
17702 .name = (n), \
17703 .flags = (fg), \
17704+ .work = &fn##_nw, \
17705 }; \
17706 __register_nmi_handler((t), &fn##_na); \
17707 })
17708
17709-int __register_nmi_handler(unsigned int, struct nmiaction *);
17710+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17711
17712 void unregister_nmi_handler(unsigned int, const char *);
17713
17714diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17715index 802dde3..9183e68 100644
17716--- a/arch/x86/include/asm/page.h
17717+++ b/arch/x86/include/asm/page.h
17718@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17719 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17720
17721 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17722+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17723
17724 #define __boot_va(x) __va(x)
17725 #define __boot_pa(x) __pa(x)
17726@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17727 * virt_to_page(kaddr) returns a valid pointer if and only if
17728 * virt_addr_valid(kaddr) returns true.
17729 */
17730-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17731 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17732 extern bool __virt_addr_valid(unsigned long kaddr);
17733 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17734
17735+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17736+#define virt_to_page(kaddr) \
17737+ ({ \
17738+ const void *__kaddr = (const void *)(kaddr); \
17739+ BUG_ON(!virt_addr_valid(__kaddr)); \
17740+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17741+ })
17742+#else
17743+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17744+#endif
17745+
17746 #endif /* __ASSEMBLY__ */
17747
17748 #include <asm-generic/memory_model.h>
17749diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17750index b3bebf9..e1f5d95 100644
17751--- a/arch/x86/include/asm/page_64.h
17752+++ b/arch/x86/include/asm/page_64.h
17753@@ -7,9 +7,9 @@
17754
17755 /* duplicated to the one in bootmem.h */
17756 extern unsigned long max_pfn;
17757-extern unsigned long phys_base;
17758+extern const unsigned long phys_base;
17759
17760-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17761+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17762 {
17763 unsigned long y = x - __START_KERNEL_map;
17764
17765diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17766index 32444ae..1a1624b 100644
17767--- a/arch/x86/include/asm/paravirt.h
17768+++ b/arch/x86/include/asm/paravirt.h
17769@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17770 return (pmd_t) { ret };
17771 }
17772
17773-static inline pmdval_t pmd_val(pmd_t pmd)
17774+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17775 {
17776 pmdval_t ret;
17777
17778@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17779 val);
17780 }
17781
17782+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17783+{
17784+ pgdval_t val = native_pgd_val(pgd);
17785+
17786+ if (sizeof(pgdval_t) > sizeof(long))
17787+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17788+ val, (u64)val >> 32);
17789+ else
17790+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17791+ val);
17792+}
17793+
17794 static inline void pgd_clear(pgd_t *pgdp)
17795 {
17796 set_pgd(pgdp, __pgd(0));
17797@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17798 pv_mmu_ops.set_fixmap(idx, phys, flags);
17799 }
17800
17801+#ifdef CONFIG_PAX_KERNEXEC
17802+static inline unsigned long pax_open_kernel(void)
17803+{
17804+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17805+}
17806+
17807+static inline unsigned long pax_close_kernel(void)
17808+{
17809+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17810+}
17811+#else
17812+static inline unsigned long pax_open_kernel(void) { return 0; }
17813+static inline unsigned long pax_close_kernel(void) { return 0; }
17814+#endif
17815+
17816 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17817
17818 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17819@@ -906,7 +933,7 @@ extern void default_banner(void);
17820
17821 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17822 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17823-#define PARA_INDIRECT(addr) *%cs:addr
17824+#define PARA_INDIRECT(addr) *%ss:addr
17825 #endif
17826
17827 #define INTERRUPT_RETURN \
17828@@ -981,6 +1008,21 @@ extern void default_banner(void);
17829 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17830 CLBR_NONE, \
17831 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17832+
17833+#define GET_CR0_INTO_RDI \
17834+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17835+ mov %rax,%rdi
17836+
17837+#define SET_RDI_INTO_CR0 \
17838+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17839+
17840+#define GET_CR3_INTO_RDI \
17841+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17842+ mov %rax,%rdi
17843+
17844+#define SET_RDI_INTO_CR3 \
17845+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17846+
17847 #endif /* CONFIG_X86_32 */
17848
17849 #endif /* __ASSEMBLY__ */
17850diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17851index 7549b8b..f0edfda 100644
17852--- a/arch/x86/include/asm/paravirt_types.h
17853+++ b/arch/x86/include/asm/paravirt_types.h
17854@@ -84,7 +84,7 @@ struct pv_init_ops {
17855 */
17856 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17857 unsigned long addr, unsigned len);
17858-};
17859+} __no_const __no_randomize_layout;
17860
17861
17862 struct pv_lazy_ops {
17863@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17864 void (*enter)(void);
17865 void (*leave)(void);
17866 void (*flush)(void);
17867-};
17868+} __no_randomize_layout;
17869
17870 struct pv_time_ops {
17871 unsigned long long (*sched_clock)(void);
17872 unsigned long long (*steal_clock)(int cpu);
17873 unsigned long (*get_tsc_khz)(void);
17874-};
17875+} __no_const __no_randomize_layout;
17876
17877 struct pv_cpu_ops {
17878 /* hooks for various privileged instructions */
17879@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17880
17881 void (*start_context_switch)(struct task_struct *prev);
17882 void (*end_context_switch)(struct task_struct *next);
17883-};
17884+} __no_const __no_randomize_layout;
17885
17886 struct pv_irq_ops {
17887 /*
17888@@ -215,7 +215,7 @@ struct pv_irq_ops {
17889 #ifdef CONFIG_X86_64
17890 void (*adjust_exception_frame)(void);
17891 #endif
17892-};
17893+} __no_randomize_layout;
17894
17895 struct pv_apic_ops {
17896 #ifdef CONFIG_X86_LOCAL_APIC
17897@@ -223,7 +223,7 @@ struct pv_apic_ops {
17898 unsigned long start_eip,
17899 unsigned long start_esp);
17900 #endif
17901-};
17902+} __no_const __no_randomize_layout;
17903
17904 struct pv_mmu_ops {
17905 unsigned long (*read_cr2)(void);
17906@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17907 struct paravirt_callee_save make_pud;
17908
17909 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17910+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17911 #endif /* PAGETABLE_LEVELS == 4 */
17912 #endif /* PAGETABLE_LEVELS >= 3 */
17913
17914@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17915 an mfn. We can tell which is which from the index. */
17916 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17917 phys_addr_t phys, pgprot_t flags);
17918-};
17919+
17920+#ifdef CONFIG_PAX_KERNEXEC
17921+ unsigned long (*pax_open_kernel)(void);
17922+ unsigned long (*pax_close_kernel)(void);
17923+#endif
17924+
17925+} __no_randomize_layout;
17926
17927 struct arch_spinlock;
17928 #ifdef CONFIG_SMP
17929@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17930 struct pv_lock_ops {
17931 struct paravirt_callee_save lock_spinning;
17932 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17933-};
17934+} __no_randomize_layout;
17935
17936 /* This contains all the paravirt structures: we get a convenient
17937 * number for each function using the offset which we use to indicate
17938- * what to patch. */
17939+ * what to patch.
17940+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17941+ */
17942+
17943 struct paravirt_patch_template {
17944 struct pv_init_ops pv_init_ops;
17945 struct pv_time_ops pv_time_ops;
17946@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17947 struct pv_apic_ops pv_apic_ops;
17948 struct pv_mmu_ops pv_mmu_ops;
17949 struct pv_lock_ops pv_lock_ops;
17950-};
17951+} __no_randomize_layout;
17952
17953 extern struct pv_info pv_info;
17954 extern struct pv_init_ops pv_init_ops;
17955diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17956index c4412e9..90e88c5 100644
17957--- a/arch/x86/include/asm/pgalloc.h
17958+++ b/arch/x86/include/asm/pgalloc.h
17959@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17960 pmd_t *pmd, pte_t *pte)
17961 {
17962 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17963+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17964+}
17965+
17966+static inline void pmd_populate_user(struct mm_struct *mm,
17967+ pmd_t *pmd, pte_t *pte)
17968+{
17969+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17970 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17971 }
17972
17973@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17974
17975 #ifdef CONFIG_X86_PAE
17976 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17977+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17978+{
17979+ pud_populate(mm, pudp, pmd);
17980+}
17981 #else /* !CONFIG_X86_PAE */
17982 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17983 {
17984 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17985 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17986 }
17987+
17988+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17989+{
17990+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17991+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17992+}
17993 #endif /* CONFIG_X86_PAE */
17994
17995 #if PAGETABLE_LEVELS > 3
17996@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17997 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17998 }
17999
18000+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18001+{
18002+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18003+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18004+}
18005+
18006 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18007 {
18008 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18009diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18010index 206a87f..1623b06 100644
18011--- a/arch/x86/include/asm/pgtable-2level.h
18012+++ b/arch/x86/include/asm/pgtable-2level.h
18013@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18014
18015 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18016 {
18017+ pax_open_kernel();
18018 *pmdp = pmd;
18019+ pax_close_kernel();
18020 }
18021
18022 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18023diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18024index 81bb91b..9392125 100644
18025--- a/arch/x86/include/asm/pgtable-3level.h
18026+++ b/arch/x86/include/asm/pgtable-3level.h
18027@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18028
18029 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18030 {
18031+ pax_open_kernel();
18032 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18033+ pax_close_kernel();
18034 }
18035
18036 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18037 {
18038+ pax_open_kernel();
18039 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18040+ pax_close_kernel();
18041 }
18042
18043 /*
18044diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18045index e8a5454..1539359 100644
18046--- a/arch/x86/include/asm/pgtable.h
18047+++ b/arch/x86/include/asm/pgtable.h
18048@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18049
18050 #ifndef __PAGETABLE_PUD_FOLDED
18051 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18052+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18053 #define pgd_clear(pgd) native_pgd_clear(pgd)
18054 #endif
18055
18056@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18057
18058 #define arch_end_context_switch(prev) do {} while(0)
18059
18060+#define pax_open_kernel() native_pax_open_kernel()
18061+#define pax_close_kernel() native_pax_close_kernel()
18062 #endif /* CONFIG_PARAVIRT */
18063
18064+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18065+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18066+
18067+#ifdef CONFIG_PAX_KERNEXEC
18068+static inline unsigned long native_pax_open_kernel(void)
18069+{
18070+ unsigned long cr0;
18071+
18072+ preempt_disable();
18073+ barrier();
18074+ cr0 = read_cr0() ^ X86_CR0_WP;
18075+ BUG_ON(cr0 & X86_CR0_WP);
18076+ write_cr0(cr0);
18077+ barrier();
18078+ return cr0 ^ X86_CR0_WP;
18079+}
18080+
18081+static inline unsigned long native_pax_close_kernel(void)
18082+{
18083+ unsigned long cr0;
18084+
18085+ barrier();
18086+ cr0 = read_cr0() ^ X86_CR0_WP;
18087+ BUG_ON(!(cr0 & X86_CR0_WP));
18088+ write_cr0(cr0);
18089+ barrier();
18090+ preempt_enable_no_resched();
18091+ return cr0 ^ X86_CR0_WP;
18092+}
18093+#else
18094+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18095+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18096+#endif
18097+
18098 /*
18099 * The following only work if pte_present() is true.
18100 * Undefined behaviour if not..
18101 */
18102+static inline int pte_user(pte_t pte)
18103+{
18104+ return pte_val(pte) & _PAGE_USER;
18105+}
18106+
18107 static inline int pte_dirty(pte_t pte)
18108 {
18109 return pte_flags(pte) & _PAGE_DIRTY;
18110@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18111 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18112 }
18113
18114+static inline unsigned long pgd_pfn(pgd_t pgd)
18115+{
18116+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18117+}
18118+
18119 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18120
18121 static inline int pmd_large(pmd_t pte)
18122@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18123 return pte_clear_flags(pte, _PAGE_RW);
18124 }
18125
18126+static inline pte_t pte_mkread(pte_t pte)
18127+{
18128+ return __pte(pte_val(pte) | _PAGE_USER);
18129+}
18130+
18131 static inline pte_t pte_mkexec(pte_t pte)
18132 {
18133- return pte_clear_flags(pte, _PAGE_NX);
18134+#ifdef CONFIG_X86_PAE
18135+ if (__supported_pte_mask & _PAGE_NX)
18136+ return pte_clear_flags(pte, _PAGE_NX);
18137+ else
18138+#endif
18139+ return pte_set_flags(pte, _PAGE_USER);
18140+}
18141+
18142+static inline pte_t pte_exprotect(pte_t pte)
18143+{
18144+#ifdef CONFIG_X86_PAE
18145+ if (__supported_pte_mask & _PAGE_NX)
18146+ return pte_set_flags(pte, _PAGE_NX);
18147+ else
18148+#endif
18149+ return pte_clear_flags(pte, _PAGE_USER);
18150 }
18151
18152 static inline pte_t pte_mkdirty(pte_t pte)
18153@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18154 #endif
18155
18156 #ifndef __ASSEMBLY__
18157+
18158+#ifdef CONFIG_PAX_PER_CPU_PGD
18159+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18160+enum cpu_pgd_type {kernel = 0, user = 1};
18161+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18162+{
18163+ return cpu_pgd[cpu][type];
18164+}
18165+#endif
18166+
18167 #include <linux/mm_types.h>
18168 #include <linux/mmdebug.h>
18169 #include <linux/log2.h>
18170@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18171 * Currently stuck as a macro due to indirect forward reference to
18172 * linux/mmzone.h's __section_mem_map_addr() definition:
18173 */
18174-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18175+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18176
18177 /* Find an entry in the second-level page table.. */
18178 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18179@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18180 * Currently stuck as a macro due to indirect forward reference to
18181 * linux/mmzone.h's __section_mem_map_addr() definition:
18182 */
18183-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18184+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18185
18186 /* to find an entry in a page-table-directory. */
18187 static inline unsigned long pud_index(unsigned long address)
18188@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18189
18190 static inline int pgd_bad(pgd_t pgd)
18191 {
18192- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18193+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18194 }
18195
18196 static inline int pgd_none(pgd_t pgd)
18197@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18198 * pgd_offset() returns a (pgd_t *)
18199 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18200 */
18201-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18202+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18203+
18204+#ifdef CONFIG_PAX_PER_CPU_PGD
18205+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18206+#endif
18207+
18208 /*
18209 * a shortcut which implies the use of the kernel's pgd, instead
18210 * of a process's
18211@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18212 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18213 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18214
18215+#ifdef CONFIG_X86_32
18216+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18217+#else
18218+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18219+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18220+
18221+#ifdef CONFIG_PAX_MEMORY_UDEREF
18222+#ifdef __ASSEMBLY__
18223+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18224+#else
18225+extern unsigned long pax_user_shadow_base;
18226+extern pgdval_t clone_pgd_mask;
18227+#endif
18228+#endif
18229+
18230+#endif
18231+
18232 #ifndef __ASSEMBLY__
18233
18234 extern int direct_gbpages;
18235@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18236 * dst and src can be on the same page, but the range must not overlap,
18237 * and must not cross a page boundary.
18238 */
18239-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18240+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18241 {
18242- memcpy(dst, src, count * sizeof(pgd_t));
18243+ pax_open_kernel();
18244+ while (count--)
18245+ *dst++ = *src++;
18246+ pax_close_kernel();
18247 }
18248
18249+#ifdef CONFIG_PAX_PER_CPU_PGD
18250+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18251+#endif
18252+
18253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18254+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18255+#else
18256+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18257+#endif
18258+
18259 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18260 static inline int page_level_shift(enum pg_level level)
18261 {
18262diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18263index b6c0b40..3535d47 100644
18264--- a/arch/x86/include/asm/pgtable_32.h
18265+++ b/arch/x86/include/asm/pgtable_32.h
18266@@ -25,9 +25,6 @@
18267 struct mm_struct;
18268 struct vm_area_struct;
18269
18270-extern pgd_t swapper_pg_dir[1024];
18271-extern pgd_t initial_page_table[1024];
18272-
18273 static inline void pgtable_cache_init(void) { }
18274 static inline void check_pgt_cache(void) { }
18275 void paging_init(void);
18276@@ -45,6 +42,12 @@ void paging_init(void);
18277 # include <asm/pgtable-2level.h>
18278 #endif
18279
18280+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18281+extern pgd_t initial_page_table[PTRS_PER_PGD];
18282+#ifdef CONFIG_X86_PAE
18283+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18284+#endif
18285+
18286 #if defined(CONFIG_HIGHPTE)
18287 #define pte_offset_map(dir, address) \
18288 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18289@@ -59,12 +62,17 @@ void paging_init(void);
18290 /* Clear a kernel PTE and flush it from the TLB */
18291 #define kpte_clear_flush(ptep, vaddr) \
18292 do { \
18293+ pax_open_kernel(); \
18294 pte_clear(&init_mm, (vaddr), (ptep)); \
18295+ pax_close_kernel(); \
18296 __flush_tlb_one((vaddr)); \
18297 } while (0)
18298
18299 #endif /* !__ASSEMBLY__ */
18300
18301+#define HAVE_ARCH_UNMAPPED_AREA
18302+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18303+
18304 /*
18305 * kern_addr_valid() is (1) for FLATMEM and (0) for
18306 * SPARSEMEM and DISCONTIGMEM
18307diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18308index 9fb2f2b..b04b4bf 100644
18309--- a/arch/x86/include/asm/pgtable_32_types.h
18310+++ b/arch/x86/include/asm/pgtable_32_types.h
18311@@ -8,7 +8,7 @@
18312 */
18313 #ifdef CONFIG_X86_PAE
18314 # include <asm/pgtable-3level_types.h>
18315-# define PMD_SIZE (1UL << PMD_SHIFT)
18316+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18317 # define PMD_MASK (~(PMD_SIZE - 1))
18318 #else
18319 # include <asm/pgtable-2level_types.h>
18320@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18321 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18322 #endif
18323
18324+#ifdef CONFIG_PAX_KERNEXEC
18325+#ifndef __ASSEMBLY__
18326+extern unsigned char MODULES_EXEC_VADDR[];
18327+extern unsigned char MODULES_EXEC_END[];
18328+#endif
18329+#include <asm/boot.h>
18330+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18331+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18332+#else
18333+#define ktla_ktva(addr) (addr)
18334+#define ktva_ktla(addr) (addr)
18335+#endif
18336+
18337 #define MODULES_VADDR VMALLOC_START
18338 #define MODULES_END VMALLOC_END
18339 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18340diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18341index 4572b2f..4430113 100644
18342--- a/arch/x86/include/asm/pgtable_64.h
18343+++ b/arch/x86/include/asm/pgtable_64.h
18344@@ -16,11 +16,16 @@
18345
18346 extern pud_t level3_kernel_pgt[512];
18347 extern pud_t level3_ident_pgt[512];
18348+extern pud_t level3_vmalloc_start_pgt[512];
18349+extern pud_t level3_vmalloc_end_pgt[512];
18350+extern pud_t level3_vmemmap_pgt[512];
18351+extern pud_t level2_vmemmap_pgt[512];
18352 extern pmd_t level2_kernel_pgt[512];
18353 extern pmd_t level2_fixmap_pgt[512];
18354-extern pmd_t level2_ident_pgt[512];
18355+extern pmd_t level2_ident_pgt[512*2];
18356 extern pte_t level1_fixmap_pgt[512];
18357-extern pgd_t init_level4_pgt[];
18358+extern pte_t level1_vsyscall_pgt[512];
18359+extern pgd_t init_level4_pgt[512];
18360
18361 #define swapper_pg_dir init_level4_pgt
18362
18363@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18364
18365 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18366 {
18367+ pax_open_kernel();
18368 *pmdp = pmd;
18369+ pax_close_kernel();
18370 }
18371
18372 static inline void native_pmd_clear(pmd_t *pmd)
18373@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18374
18375 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18376 {
18377+ pax_open_kernel();
18378 *pudp = pud;
18379+ pax_close_kernel();
18380 }
18381
18382 static inline void native_pud_clear(pud_t *pud)
18383@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18384
18385 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18386 {
18387+ pax_open_kernel();
18388+ *pgdp = pgd;
18389+ pax_close_kernel();
18390+}
18391+
18392+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18393+{
18394 *pgdp = pgd;
18395 }
18396
18397diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18398index 602b602..acb53ed 100644
18399--- a/arch/x86/include/asm/pgtable_64_types.h
18400+++ b/arch/x86/include/asm/pgtable_64_types.h
18401@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18402 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18403 #define MODULES_END _AC(0xffffffffff000000, UL)
18404 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18405+#define MODULES_EXEC_VADDR MODULES_VADDR
18406+#define MODULES_EXEC_END MODULES_END
18407 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18408 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18409 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18410 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18411
18412+#define ktla_ktva(addr) (addr)
18413+#define ktva_ktla(addr) (addr)
18414+
18415 #define EARLY_DYNAMIC_PAGE_TABLES 64
18416
18417 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18418diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18419index 25bcd4a..bf3f815 100644
18420--- a/arch/x86/include/asm/pgtable_types.h
18421+++ b/arch/x86/include/asm/pgtable_types.h
18422@@ -110,8 +110,10 @@
18423
18424 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18425 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18426-#else
18427+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18428 #define _PAGE_NX (_AT(pteval_t, 0))
18429+#else
18430+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18431 #endif
18432
18433 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18434@@ -167,6 +169,9 @@ enum page_cache_mode {
18435 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18436 _PAGE_ACCESSED)
18437
18438+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18439+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18440+
18441 #define __PAGE_KERNEL_EXEC \
18442 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18443 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18444@@ -174,7 +179,7 @@ enum page_cache_mode {
18445 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18446 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18447 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18448-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18449+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18450 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18451 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18452 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18453@@ -220,7 +225,7 @@ enum page_cache_mode {
18454 #ifdef CONFIG_X86_64
18455 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18456 #else
18457-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18458+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18459 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18460 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18461 #endif
18462@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18463 {
18464 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18465 }
18466+#endif
18467
18468+#if PAGETABLE_LEVELS == 3
18469+#include <asm-generic/pgtable-nopud.h>
18470+#endif
18471+
18472+#if PAGETABLE_LEVELS == 2
18473+#include <asm-generic/pgtable-nopmd.h>
18474+#endif
18475+
18476+#ifndef __ASSEMBLY__
18477 #if PAGETABLE_LEVELS > 3
18478 typedef struct { pudval_t pud; } pud_t;
18479
18480@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18481 return pud.pud;
18482 }
18483 #else
18484-#include <asm-generic/pgtable-nopud.h>
18485-
18486 static inline pudval_t native_pud_val(pud_t pud)
18487 {
18488 return native_pgd_val(pud.pgd);
18489@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18490 return pmd.pmd;
18491 }
18492 #else
18493-#include <asm-generic/pgtable-nopmd.h>
18494-
18495 static inline pmdval_t native_pmd_val(pmd_t pmd)
18496 {
18497 return native_pgd_val(pmd.pud.pgd);
18498@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18499
18500 extern pteval_t __supported_pte_mask;
18501 extern void set_nx(void);
18502-extern int nx_enabled;
18503
18504 #define pgprot_writecombine pgprot_writecombine
18505 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18506diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18507index 8f327184..368fb29 100644
18508--- a/arch/x86/include/asm/preempt.h
18509+++ b/arch/x86/include/asm/preempt.h
18510@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18511 */
18512 static __always_inline bool __preempt_count_dec_and_test(void)
18513 {
18514- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18515+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18516 }
18517
18518 /*
18519diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18520index a092a0c..8e9640b 100644
18521--- a/arch/x86/include/asm/processor.h
18522+++ b/arch/x86/include/asm/processor.h
18523@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18524 /* Index into per_cpu list: */
18525 u16 cpu_index;
18526 u32 microcode;
18527-};
18528+} __randomize_layout;
18529
18530 #define X86_VENDOR_INTEL 0
18531 #define X86_VENDOR_CYRIX 1
18532@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18533 : "memory");
18534 }
18535
18536+/* invpcid (%rdx),%rax */
18537+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18538+
18539+#define INVPCID_SINGLE_ADDRESS 0UL
18540+#define INVPCID_SINGLE_CONTEXT 1UL
18541+#define INVPCID_ALL_GLOBAL 2UL
18542+#define INVPCID_ALL_NONGLOBAL 3UL
18543+
18544+#define PCID_KERNEL 0UL
18545+#define PCID_USER 1UL
18546+#define PCID_NOFLUSH (1UL << 63)
18547+
18548 static inline void load_cr3(pgd_t *pgdir)
18549 {
18550- write_cr3(__pa(pgdir));
18551+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18552 }
18553
18554 #ifdef CONFIG_X86_32
18555@@ -282,7 +294,7 @@ struct tss_struct {
18556
18557 } ____cacheline_aligned;
18558
18559-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18560+extern struct tss_struct init_tss[NR_CPUS];
18561
18562 /*
18563 * Save the original ist values for checking stack pointers during debugging
18564@@ -479,6 +491,7 @@ struct thread_struct {
18565 unsigned short ds;
18566 unsigned short fsindex;
18567 unsigned short gsindex;
18568+ unsigned short ss;
18569 #endif
18570 #ifdef CONFIG_X86_32
18571 unsigned long ip;
18572@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18573 extern unsigned long mmu_cr4_features;
18574 extern u32 *trampoline_cr4_features;
18575
18576-static inline void set_in_cr4(unsigned long mask)
18577-{
18578- unsigned long cr4;
18579-
18580- mmu_cr4_features |= mask;
18581- if (trampoline_cr4_features)
18582- *trampoline_cr4_features = mmu_cr4_features;
18583- cr4 = read_cr4();
18584- cr4 |= mask;
18585- write_cr4(cr4);
18586-}
18587-
18588-static inline void clear_in_cr4(unsigned long mask)
18589-{
18590- unsigned long cr4;
18591-
18592- mmu_cr4_features &= ~mask;
18593- if (trampoline_cr4_features)
18594- *trampoline_cr4_features = mmu_cr4_features;
18595- cr4 = read_cr4();
18596- cr4 &= ~mask;
18597- write_cr4(cr4);
18598-}
18599+extern void set_in_cr4(unsigned long mask);
18600+extern void clear_in_cr4(unsigned long mask);
18601
18602 typedef struct {
18603 unsigned long seg;
18604@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18605 */
18606 #define TASK_SIZE PAGE_OFFSET
18607 #define TASK_SIZE_MAX TASK_SIZE
18608+
18609+#ifdef CONFIG_PAX_SEGMEXEC
18610+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18611+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18612+#else
18613 #define STACK_TOP TASK_SIZE
18614-#define STACK_TOP_MAX STACK_TOP
18615+#endif
18616+
18617+#define STACK_TOP_MAX TASK_SIZE
18618
18619 #define INIT_THREAD { \
18620- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18621+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18622 .vm86_info = NULL, \
18623 .sysenter_cs = __KERNEL_CS, \
18624 .io_bitmap_ptr = NULL, \
18625@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18626 */
18627 #define INIT_TSS { \
18628 .x86_tss = { \
18629- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18630+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18631 .ss0 = __KERNEL_DS, \
18632 .ss1 = __KERNEL_CS, \
18633 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18634@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18635 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18636
18637 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18638-#define KSTK_TOP(info) \
18639-({ \
18640- unsigned long *__ptr = (unsigned long *)(info); \
18641- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18642-})
18643+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18644
18645 /*
18646 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18647@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18648 #define task_pt_regs(task) \
18649 ({ \
18650 struct pt_regs *__regs__; \
18651- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18652+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18653 __regs__ - 1; \
18654 })
18655
18656@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18657 * particular problem by preventing anything from being mapped
18658 * at the maximum canonical address.
18659 */
18660-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18661+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18662
18663 /* This decides where the kernel will search for a free chunk of vm
18664 * space during mmap's.
18665 */
18666 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18667- 0xc0000000 : 0xFFFFe000)
18668+ 0xc0000000 : 0xFFFFf000)
18669
18670 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18671 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18672@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18673 #define STACK_TOP_MAX TASK_SIZE_MAX
18674
18675 #define INIT_THREAD { \
18676- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18677+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18678 }
18679
18680 #define INIT_TSS { \
18681- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18682+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18683 }
18684
18685 /*
18686@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18687 */
18688 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18689
18690+#ifdef CONFIG_PAX_SEGMEXEC
18691+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18692+#endif
18693+
18694 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18695
18696 /* Get/set a process' ability to use the timestamp counter instruction */
18697@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18698 return 0;
18699 }
18700
18701-extern unsigned long arch_align_stack(unsigned long sp);
18702+#define arch_align_stack(x) ((x) & ~0xfUL)
18703 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18704
18705 void default_idle(void);
18706@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18707 #define xen_set_default_idle 0
18708 #endif
18709
18710-void stop_this_cpu(void *dummy);
18711+void stop_this_cpu(void *dummy) __noreturn;
18712 void df_debug(struct pt_regs *regs, long error_code);
18713 #endif /* _ASM_X86_PROCESSOR_H */
18714diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18715index 86fc2bb..bd5049a 100644
18716--- a/arch/x86/include/asm/ptrace.h
18717+++ b/arch/x86/include/asm/ptrace.h
18718@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18719 }
18720
18721 /*
18722- * user_mode_vm(regs) determines whether a register set came from user mode.
18723+ * user_mode(regs) determines whether a register set came from user mode.
18724 * This is true if V8086 mode was enabled OR if the register set was from
18725 * protected mode with RPL-3 CS value. This tricky test checks that with
18726 * one comparison. Many places in the kernel can bypass this full check
18727- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18728+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18729+ * be used.
18730 */
18731-static inline int user_mode(struct pt_regs *regs)
18732+static inline int user_mode_novm(struct pt_regs *regs)
18733 {
18734 #ifdef CONFIG_X86_32
18735 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18736 #else
18737- return !!(regs->cs & 3);
18738+ return !!(regs->cs & SEGMENT_RPL_MASK);
18739 #endif
18740 }
18741
18742-static inline int user_mode_vm(struct pt_regs *regs)
18743+static inline int user_mode(struct pt_regs *regs)
18744 {
18745 #ifdef CONFIG_X86_32
18746 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18747 USER_RPL;
18748 #else
18749- return user_mode(regs);
18750+ return user_mode_novm(regs);
18751 #endif
18752 }
18753
18754@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18755 #ifdef CONFIG_X86_64
18756 static inline bool user_64bit_mode(struct pt_regs *regs)
18757 {
18758+ unsigned long cs = regs->cs & 0xffff;
18759 #ifndef CONFIG_PARAVIRT
18760 /*
18761 * On non-paravirt systems, this is the only long mode CPL 3
18762 * selector. We do not allow long mode selectors in the LDT.
18763 */
18764- return regs->cs == __USER_CS;
18765+ return cs == __USER_CS;
18766 #else
18767 /* Headers are too twisted for this to go in paravirt.h. */
18768- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18769+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18770 #endif
18771 }
18772
18773@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18774 * Traps from the kernel do not save sp and ss.
18775 * Use the helper function to retrieve sp.
18776 */
18777- if (offset == offsetof(struct pt_regs, sp) &&
18778- regs->cs == __KERNEL_CS)
18779- return kernel_stack_pointer(regs);
18780+ if (offset == offsetof(struct pt_regs, sp)) {
18781+ unsigned long cs = regs->cs & 0xffff;
18782+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18783+ return kernel_stack_pointer(regs);
18784+ }
18785 #endif
18786 return *(unsigned long *)((unsigned long)regs + offset);
18787 }
18788diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18789index ae0e241..e80b10b 100644
18790--- a/arch/x86/include/asm/qrwlock.h
18791+++ b/arch/x86/include/asm/qrwlock.h
18792@@ -7,8 +7,8 @@
18793 #define queue_write_unlock queue_write_unlock
18794 static inline void queue_write_unlock(struct qrwlock *lock)
18795 {
18796- barrier();
18797- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18798+ barrier();
18799+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18800 }
18801 #endif
18802
18803diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18804index 9c6b890..5305f53 100644
18805--- a/arch/x86/include/asm/realmode.h
18806+++ b/arch/x86/include/asm/realmode.h
18807@@ -22,16 +22,14 @@ struct real_mode_header {
18808 #endif
18809 /* APM/BIOS reboot */
18810 u32 machine_real_restart_asm;
18811-#ifdef CONFIG_X86_64
18812 u32 machine_real_restart_seg;
18813-#endif
18814 };
18815
18816 /* This must match data at trampoline_32/64.S */
18817 struct trampoline_header {
18818 #ifdef CONFIG_X86_32
18819 u32 start;
18820- u16 gdt_pad;
18821+ u16 boot_cs;
18822 u16 gdt_limit;
18823 u32 gdt_base;
18824 #else
18825diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18826index a82c4f1..ac45053 100644
18827--- a/arch/x86/include/asm/reboot.h
18828+++ b/arch/x86/include/asm/reboot.h
18829@@ -6,13 +6,13 @@
18830 struct pt_regs;
18831
18832 struct machine_ops {
18833- void (*restart)(char *cmd);
18834- void (*halt)(void);
18835- void (*power_off)(void);
18836+ void (* __noreturn restart)(char *cmd);
18837+ void (* __noreturn halt)(void);
18838+ void (* __noreturn power_off)(void);
18839 void (*shutdown)(void);
18840 void (*crash_shutdown)(struct pt_regs *);
18841- void (*emergency_restart)(void);
18842-};
18843+ void (* __noreturn emergency_restart)(void);
18844+} __no_const;
18845
18846 extern struct machine_ops machine_ops;
18847
18848diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18849index 8f7866a..e442f20 100644
18850--- a/arch/x86/include/asm/rmwcc.h
18851+++ b/arch/x86/include/asm/rmwcc.h
18852@@ -3,7 +3,34 @@
18853
18854 #ifdef CC_HAVE_ASM_GOTO
18855
18856-#define __GEN_RMWcc(fullop, var, cc, ...) \
18857+#ifdef CONFIG_PAX_REFCOUNT
18858+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18859+do { \
18860+ asm_volatile_goto (fullop \
18861+ ";jno 0f\n" \
18862+ fullantiop \
18863+ ";int $4\n0:\n" \
18864+ _ASM_EXTABLE(0b, 0b) \
18865+ ";j" cc " %l[cc_label]" \
18866+ : : "m" (var), ## __VA_ARGS__ \
18867+ : "memory" : cc_label); \
18868+ return 0; \
18869+cc_label: \
18870+ return 1; \
18871+} while (0)
18872+#else
18873+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18874+do { \
18875+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18876+ : : "m" (var), ## __VA_ARGS__ \
18877+ : "memory" : cc_label); \
18878+ return 0; \
18879+cc_label: \
18880+ return 1; \
18881+} while (0)
18882+#endif
18883+
18884+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18885 do { \
18886 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18887 : : "m" (var), ## __VA_ARGS__ \
18888@@ -13,15 +40,46 @@ cc_label: \
18889 return 1; \
18890 } while (0)
18891
18892-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18893- __GEN_RMWcc(op " " arg0, var, cc)
18894+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18895+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18896
18897-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18898- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18899+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18900+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18901+
18902+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18903+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18904+
18905+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18906+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18907
18908 #else /* !CC_HAVE_ASM_GOTO */
18909
18910-#define __GEN_RMWcc(fullop, var, cc, ...) \
18911+#ifdef CONFIG_PAX_REFCOUNT
18912+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18913+do { \
18914+ char c; \
18915+ asm volatile (fullop \
18916+ ";jno 0f\n" \
18917+ fullantiop \
18918+ ";int $4\n0:\n" \
18919+ _ASM_EXTABLE(0b, 0b) \
18920+ "; set" cc " %1" \
18921+ : "+m" (var), "=qm" (c) \
18922+ : __VA_ARGS__ : "memory"); \
18923+ return c != 0; \
18924+} while (0)
18925+#else
18926+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18927+do { \
18928+ char c; \
18929+ asm volatile (fullop "; set" cc " %1" \
18930+ : "+m" (var), "=qm" (c) \
18931+ : __VA_ARGS__ : "memory"); \
18932+ return c != 0; \
18933+} while (0)
18934+#endif
18935+
18936+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18937 do { \
18938 char c; \
18939 asm volatile (fullop "; set" cc " %1" \
18940@@ -30,11 +88,17 @@ do { \
18941 return c != 0; \
18942 } while (0)
18943
18944-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18945- __GEN_RMWcc(op " " arg0, var, cc)
18946+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18947+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18948+
18949+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18950+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18951+
18952+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18953+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18954
18955-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18956- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18957+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18958+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18959
18960 #endif /* CC_HAVE_ASM_GOTO */
18961
18962diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18963index cad82c9..2e5c5c1 100644
18964--- a/arch/x86/include/asm/rwsem.h
18965+++ b/arch/x86/include/asm/rwsem.h
18966@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18967 {
18968 asm volatile("# beginning down_read\n\t"
18969 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18970+
18971+#ifdef CONFIG_PAX_REFCOUNT
18972+ "jno 0f\n"
18973+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18974+ "int $4\n0:\n"
18975+ _ASM_EXTABLE(0b, 0b)
18976+#endif
18977+
18978 /* adds 0x00000001 */
18979 " jns 1f\n"
18980 " call call_rwsem_down_read_failed\n"
18981@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18982 "1:\n\t"
18983 " mov %1,%2\n\t"
18984 " add %3,%2\n\t"
18985+
18986+#ifdef CONFIG_PAX_REFCOUNT
18987+ "jno 0f\n"
18988+ "sub %3,%2\n"
18989+ "int $4\n0:\n"
18990+ _ASM_EXTABLE(0b, 0b)
18991+#endif
18992+
18993 " jle 2f\n\t"
18994 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18995 " jnz 1b\n\t"
18996@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18997 long tmp;
18998 asm volatile("# beginning down_write\n\t"
18999 LOCK_PREFIX " xadd %1,(%2)\n\t"
19000+
19001+#ifdef CONFIG_PAX_REFCOUNT
19002+ "jno 0f\n"
19003+ "mov %1,(%2)\n"
19004+ "int $4\n0:\n"
19005+ _ASM_EXTABLE(0b, 0b)
19006+#endif
19007+
19008 /* adds 0xffff0001, returns the old value */
19009 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19010 /* was the active mask 0 before? */
19011@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19012 long tmp;
19013 asm volatile("# beginning __up_read\n\t"
19014 LOCK_PREFIX " xadd %1,(%2)\n\t"
19015+
19016+#ifdef CONFIG_PAX_REFCOUNT
19017+ "jno 0f\n"
19018+ "mov %1,(%2)\n"
19019+ "int $4\n0:\n"
19020+ _ASM_EXTABLE(0b, 0b)
19021+#endif
19022+
19023 /* subtracts 1, returns the old value */
19024 " jns 1f\n\t"
19025 " call call_rwsem_wake\n" /* expects old value in %edx */
19026@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19027 long tmp;
19028 asm volatile("# beginning __up_write\n\t"
19029 LOCK_PREFIX " xadd %1,(%2)\n\t"
19030+
19031+#ifdef CONFIG_PAX_REFCOUNT
19032+ "jno 0f\n"
19033+ "mov %1,(%2)\n"
19034+ "int $4\n0:\n"
19035+ _ASM_EXTABLE(0b, 0b)
19036+#endif
19037+
19038 /* subtracts 0xffff0001, returns the old value */
19039 " jns 1f\n\t"
19040 " call call_rwsem_wake\n" /* expects old value in %edx */
19041@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19042 {
19043 asm volatile("# beginning __downgrade_write\n\t"
19044 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19045+
19046+#ifdef CONFIG_PAX_REFCOUNT
19047+ "jno 0f\n"
19048+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19049+ "int $4\n0:\n"
19050+ _ASM_EXTABLE(0b, 0b)
19051+#endif
19052+
19053 /*
19054 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19055 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19056@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19057 */
19058 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19059 {
19060- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19061+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19062+
19063+#ifdef CONFIG_PAX_REFCOUNT
19064+ "jno 0f\n"
19065+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19066+ "int $4\n0:\n"
19067+ _ASM_EXTABLE(0b, 0b)
19068+#endif
19069+
19070 : "+m" (sem->count)
19071 : "er" (delta));
19072 }
19073@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19074 */
19075 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19076 {
19077- return delta + xadd(&sem->count, delta);
19078+ return delta + xadd_check_overflow(&sem->count, delta);
19079 }
19080
19081 #endif /* __KERNEL__ */
19082diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19083index db257a5..b91bc77 100644
19084--- a/arch/x86/include/asm/segment.h
19085+++ b/arch/x86/include/asm/segment.h
19086@@ -73,10 +73,15 @@
19087 * 26 - ESPFIX small SS
19088 * 27 - per-cpu [ offset to per-cpu data area ]
19089 * 28 - stack_canary-20 [ for stack protector ]
19090- * 29 - unused
19091- * 30 - unused
19092+ * 29 - PCI BIOS CS
19093+ * 30 - PCI BIOS DS
19094 * 31 - TSS for double fault handler
19095 */
19096+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19097+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19098+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19099+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19100+
19101 #define GDT_ENTRY_TLS_MIN 6
19102 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19103
19104@@ -88,6 +93,8 @@
19105
19106 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19107
19108+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19109+
19110 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19111
19112 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19113@@ -113,6 +120,12 @@
19114 #define __KERNEL_STACK_CANARY 0
19115 #endif
19116
19117+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19118+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19119+
19120+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19121+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19122+
19123 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19124
19125 /*
19126@@ -140,7 +153,7 @@
19127 */
19128
19129 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19130-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19131+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19132
19133
19134 #else
19135@@ -164,6 +177,8 @@
19136 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19137 #define __USER32_DS __USER_DS
19138
19139+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19140+
19141 #define GDT_ENTRY_TSS 8 /* needs two entries */
19142 #define GDT_ENTRY_LDT 10 /* needs two entries */
19143 #define GDT_ENTRY_TLS_MIN 12
19144@@ -172,6 +187,8 @@
19145 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19146 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19147
19148+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19149+
19150 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19151 #define FS_TLS 0
19152 #define GS_TLS 1
19153@@ -179,12 +196,14 @@
19154 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19155 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19156
19157-#define GDT_ENTRIES 16
19158+#define GDT_ENTRIES 17
19159
19160 #endif
19161
19162 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19163+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19164 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19165+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19166 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19167 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19168 #ifndef CONFIG_PARAVIRT
19169@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19170 {
19171 unsigned long __limit;
19172 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19173- return __limit + 1;
19174+ return __limit;
19175 }
19176
19177 #endif /* !__ASSEMBLY__ */
19178diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19179index 8d3120f..352b440 100644
19180--- a/arch/x86/include/asm/smap.h
19181+++ b/arch/x86/include/asm/smap.h
19182@@ -25,11 +25,40 @@
19183
19184 #include <asm/alternative-asm.h>
19185
19186+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19187+#define ASM_PAX_OPEN_USERLAND \
19188+ 661: jmp 663f; \
19189+ .pushsection .altinstr_replacement, "a" ; \
19190+ 662: pushq %rax; nop; \
19191+ .popsection ; \
19192+ .pushsection .altinstructions, "a" ; \
19193+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19194+ .popsection ; \
19195+ call __pax_open_userland; \
19196+ popq %rax; \
19197+ 663:
19198+
19199+#define ASM_PAX_CLOSE_USERLAND \
19200+ 661: jmp 663f; \
19201+ .pushsection .altinstr_replacement, "a" ; \
19202+ 662: pushq %rax; nop; \
19203+ .popsection; \
19204+ .pushsection .altinstructions, "a" ; \
19205+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19206+ .popsection; \
19207+ call __pax_close_userland; \
19208+ popq %rax; \
19209+ 663:
19210+#else
19211+#define ASM_PAX_OPEN_USERLAND
19212+#define ASM_PAX_CLOSE_USERLAND
19213+#endif
19214+
19215 #ifdef CONFIG_X86_SMAP
19216
19217 #define ASM_CLAC \
19218 661: ASM_NOP3 ; \
19219- .pushsection .altinstr_replacement, "ax" ; \
19220+ .pushsection .altinstr_replacement, "a" ; \
19221 662: __ASM_CLAC ; \
19222 .popsection ; \
19223 .pushsection .altinstructions, "a" ; \
19224@@ -38,7 +67,7 @@
19225
19226 #define ASM_STAC \
19227 661: ASM_NOP3 ; \
19228- .pushsection .altinstr_replacement, "ax" ; \
19229+ .pushsection .altinstr_replacement, "a" ; \
19230 662: __ASM_STAC ; \
19231 .popsection ; \
19232 .pushsection .altinstructions, "a" ; \
19233@@ -56,6 +85,37 @@
19234
19235 #include <asm/alternative.h>
19236
19237+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19238+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19239+
19240+extern void __pax_open_userland(void);
19241+static __always_inline unsigned long pax_open_userland(void)
19242+{
19243+
19244+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19245+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19246+ :
19247+ : [open] "i" (__pax_open_userland)
19248+ : "memory", "rax");
19249+#endif
19250+
19251+ return 0;
19252+}
19253+
19254+extern void __pax_close_userland(void);
19255+static __always_inline unsigned long pax_close_userland(void)
19256+{
19257+
19258+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19259+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19260+ :
19261+ : [close] "i" (__pax_close_userland)
19262+ : "memory", "rax");
19263+#endif
19264+
19265+ return 0;
19266+}
19267+
19268 #ifdef CONFIG_X86_SMAP
19269
19270 static __always_inline void clac(void)
19271diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19272index 8cd1cc3..827e09e 100644
19273--- a/arch/x86/include/asm/smp.h
19274+++ b/arch/x86/include/asm/smp.h
19275@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19276 /* cpus sharing the last level cache: */
19277 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19278 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19279-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19280+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19281
19282 static inline struct cpumask *cpu_sibling_mask(int cpu)
19283 {
19284@@ -78,7 +78,7 @@ struct smp_ops {
19285
19286 void (*send_call_func_ipi)(const struct cpumask *mask);
19287 void (*send_call_func_single_ipi)(int cpu);
19288-};
19289+} __no_const;
19290
19291 /* Globals due to paravirt */
19292 extern void set_cpu_sibling_map(int cpu);
19293@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19294 extern int safe_smp_processor_id(void);
19295
19296 #elif defined(CONFIG_X86_64_SMP)
19297-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19298-
19299-#define stack_smp_processor_id() \
19300-({ \
19301- struct thread_info *ti; \
19302- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19303- ti->cpu; \
19304-})
19305+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19306+#define stack_smp_processor_id() raw_smp_processor_id()
19307 #define safe_smp_processor_id() smp_processor_id()
19308
19309 #endif
19310diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19311index 6a99859..03cb807 100644
19312--- a/arch/x86/include/asm/stackprotector.h
19313+++ b/arch/x86/include/asm/stackprotector.h
19314@@ -47,7 +47,7 @@
19315 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19316 */
19317 #define GDT_STACK_CANARY_INIT \
19318- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19319+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19320
19321 /*
19322 * Initialize the stackprotector canary value.
19323@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19324
19325 static inline void load_stack_canary_segment(void)
19326 {
19327-#ifdef CONFIG_X86_32
19328+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19329 asm volatile ("mov %0, %%gs" : : "r" (0));
19330 #endif
19331 }
19332diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19333index 70bbe39..4ae2bd4 100644
19334--- a/arch/x86/include/asm/stacktrace.h
19335+++ b/arch/x86/include/asm/stacktrace.h
19336@@ -11,28 +11,20 @@
19337
19338 extern int kstack_depth_to_print;
19339
19340-struct thread_info;
19341+struct task_struct;
19342 struct stacktrace_ops;
19343
19344-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19345- unsigned long *stack,
19346- unsigned long bp,
19347- const struct stacktrace_ops *ops,
19348- void *data,
19349- unsigned long *end,
19350- int *graph);
19351+typedef unsigned long walk_stack_t(struct task_struct *task,
19352+ void *stack_start,
19353+ unsigned long *stack,
19354+ unsigned long bp,
19355+ const struct stacktrace_ops *ops,
19356+ void *data,
19357+ unsigned long *end,
19358+ int *graph);
19359
19360-extern unsigned long
19361-print_context_stack(struct thread_info *tinfo,
19362- unsigned long *stack, unsigned long bp,
19363- const struct stacktrace_ops *ops, void *data,
19364- unsigned long *end, int *graph);
19365-
19366-extern unsigned long
19367-print_context_stack_bp(struct thread_info *tinfo,
19368- unsigned long *stack, unsigned long bp,
19369- const struct stacktrace_ops *ops, void *data,
19370- unsigned long *end, int *graph);
19371+extern walk_stack_t print_context_stack;
19372+extern walk_stack_t print_context_stack_bp;
19373
19374 /* Generic stack tracer with callbacks */
19375
19376@@ -40,7 +32,7 @@ struct stacktrace_ops {
19377 void (*address)(void *data, unsigned long address, int reliable);
19378 /* On negative return stop dumping */
19379 int (*stack)(void *data, char *name);
19380- walk_stack_t walk_stack;
19381+ walk_stack_t *walk_stack;
19382 };
19383
19384 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19385diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19386index 751bf4b..a1278b5 100644
19387--- a/arch/x86/include/asm/switch_to.h
19388+++ b/arch/x86/include/asm/switch_to.h
19389@@ -112,7 +112,7 @@ do { \
19390 "call __switch_to\n\t" \
19391 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19392 __switch_canary \
19393- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19394+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19395 "movq %%rax,%%rdi\n\t" \
19396 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19397 "jnz ret_from_fork\n\t" \
19398@@ -123,7 +123,7 @@ do { \
19399 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19400 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19401 [_tif_fork] "i" (_TIF_FORK), \
19402- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19403+ [thread_info] "m" (current_tinfo), \
19404 [current_task] "m" (current_task) \
19405 __switch_canary_iparam \
19406 : "memory", "cc" __EXTRA_CLOBBER)
19407diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19408index 547e344..6be1175 100644
19409--- a/arch/x86/include/asm/thread_info.h
19410+++ b/arch/x86/include/asm/thread_info.h
19411@@ -24,7 +24,6 @@ struct exec_domain;
19412 #include <linux/atomic.h>
19413
19414 struct thread_info {
19415- struct task_struct *task; /* main task structure */
19416 struct exec_domain *exec_domain; /* execution domain */
19417 __u32 flags; /* low level flags */
19418 __u32 status; /* thread synchronous flags */
19419@@ -33,13 +32,13 @@ struct thread_info {
19420 mm_segment_t addr_limit;
19421 struct restart_block restart_block;
19422 void __user *sysenter_return;
19423+ unsigned long lowest_stack;
19424 unsigned int sig_on_uaccess_error:1;
19425 unsigned int uaccess_err:1; /* uaccess failed */
19426 };
19427
19428-#define INIT_THREAD_INFO(tsk) \
19429+#define INIT_THREAD_INFO \
19430 { \
19431- .task = &tsk, \
19432 .exec_domain = &default_exec_domain, \
19433 .flags = 0, \
19434 .cpu = 0, \
19435@@ -50,7 +49,7 @@ struct thread_info {
19436 }, \
19437 }
19438
19439-#define init_thread_info (init_thread_union.thread_info)
19440+#define init_thread_info (init_thread_union.stack)
19441 #define init_stack (init_thread_union.stack)
19442
19443 #else /* !__ASSEMBLY__ */
19444@@ -91,6 +90,7 @@ struct thread_info {
19445 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19446 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19447 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19448+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19449
19450 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19451 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19452@@ -115,17 +115,18 @@ struct thread_info {
19453 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19454 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19455 #define _TIF_X32 (1 << TIF_X32)
19456+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19457
19458 /* work to do in syscall_trace_enter() */
19459 #define _TIF_WORK_SYSCALL_ENTRY \
19460 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19461 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19462- _TIF_NOHZ)
19463+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19464
19465 /* work to do in syscall_trace_leave() */
19466 #define _TIF_WORK_SYSCALL_EXIT \
19467 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19468- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19469+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19470
19471 /* work to do on interrupt/exception return */
19472 #define _TIF_WORK_MASK \
19473@@ -136,7 +137,7 @@ struct thread_info {
19474 /* work to do on any return to user space */
19475 #define _TIF_ALLWORK_MASK \
19476 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19477- _TIF_NOHZ)
19478+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19479
19480 /* Only used for 64 bit */
19481 #define _TIF_DO_NOTIFY_MASK \
19482@@ -151,7 +152,6 @@ struct thread_info {
19483 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19484
19485 #define STACK_WARN (THREAD_SIZE/8)
19486-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19487
19488 /*
19489 * macros/functions for gaining access to the thread information structure
19490@@ -162,26 +162,18 @@ struct thread_info {
19491
19492 DECLARE_PER_CPU(unsigned long, kernel_stack);
19493
19494+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19495+
19496 static inline struct thread_info *current_thread_info(void)
19497 {
19498- struct thread_info *ti;
19499- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19500- KERNEL_STACK_OFFSET - THREAD_SIZE);
19501- return ti;
19502+ return this_cpu_read_stable(current_tinfo);
19503 }
19504
19505 #else /* !__ASSEMBLY__ */
19506
19507 /* how to get the thread information struct from ASM */
19508 #define GET_THREAD_INFO(reg) \
19509- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19510- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19511-
19512-/*
19513- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19514- * a certain register (to be used in assembler memory operands).
19515- */
19516-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19517+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19518
19519 #endif
19520
19521@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19522 extern void arch_task_cache_init(void);
19523 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19524 extern void arch_release_task_struct(struct task_struct *tsk);
19525+
19526+#define __HAVE_THREAD_FUNCTIONS
19527+#define task_thread_info(task) (&(task)->tinfo)
19528+#define task_stack_page(task) ((task)->stack)
19529+#define setup_thread_stack(p, org) do {} while (0)
19530+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19531+
19532 #endif
19533 #endif /* _ASM_X86_THREAD_INFO_H */
19534diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19535index 04905bf..1178cdf 100644
19536--- a/arch/x86/include/asm/tlbflush.h
19537+++ b/arch/x86/include/asm/tlbflush.h
19538@@ -17,18 +17,44 @@
19539
19540 static inline void __native_flush_tlb(void)
19541 {
19542+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19543+ u64 descriptor[2];
19544+
19545+ descriptor[0] = PCID_KERNEL;
19546+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19547+ return;
19548+ }
19549+
19550+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19551+ if (static_cpu_has(X86_FEATURE_PCID)) {
19552+ unsigned int cpu = raw_get_cpu();
19553+
19554+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19555+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19556+ raw_put_cpu_no_resched();
19557+ return;
19558+ }
19559+#endif
19560+
19561 native_write_cr3(native_read_cr3());
19562 }
19563
19564 static inline void __native_flush_tlb_global_irq_disabled(void)
19565 {
19566- unsigned long cr4;
19567+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19568+ u64 descriptor[2];
19569
19570- cr4 = native_read_cr4();
19571- /* clear PGE */
19572- native_write_cr4(cr4 & ~X86_CR4_PGE);
19573- /* write old PGE again and flush TLBs */
19574- native_write_cr4(cr4);
19575+ descriptor[0] = PCID_KERNEL;
19576+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19577+ } else {
19578+ unsigned long cr4;
19579+
19580+ cr4 = native_read_cr4();
19581+ /* clear PGE */
19582+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19583+ /* write old PGE again and flush TLBs */
19584+ native_write_cr4(cr4);
19585+ }
19586 }
19587
19588 static inline void __native_flush_tlb_global(void)
19589@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19590
19591 static inline void __native_flush_tlb_single(unsigned long addr)
19592 {
19593+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19594+ u64 descriptor[2];
19595+
19596+ descriptor[0] = PCID_KERNEL;
19597+ descriptor[1] = addr;
19598+
19599+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19600+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19601+ if (addr < TASK_SIZE_MAX)
19602+ descriptor[1] += pax_user_shadow_base;
19603+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19604+ }
19605+
19606+ descriptor[0] = PCID_USER;
19607+ descriptor[1] = addr;
19608+#endif
19609+
19610+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19611+ return;
19612+ }
19613+
19614+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19615+ if (static_cpu_has(X86_FEATURE_PCID)) {
19616+ unsigned int cpu = raw_get_cpu();
19617+
19618+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19619+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19620+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19621+ raw_put_cpu_no_resched();
19622+
19623+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19624+ addr += pax_user_shadow_base;
19625+ }
19626+#endif
19627+
19628 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19629 }
19630
19631diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19632index 0d592e0..7430aad 100644
19633--- a/arch/x86/include/asm/uaccess.h
19634+++ b/arch/x86/include/asm/uaccess.h
19635@@ -7,6 +7,7 @@
19636 #include <linux/compiler.h>
19637 #include <linux/thread_info.h>
19638 #include <linux/string.h>
19639+#include <linux/spinlock.h>
19640 #include <asm/asm.h>
19641 #include <asm/page.h>
19642 #include <asm/smap.h>
19643@@ -29,7 +30,12 @@
19644
19645 #define get_ds() (KERNEL_DS)
19646 #define get_fs() (current_thread_info()->addr_limit)
19647+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19648+void __set_fs(mm_segment_t x);
19649+void set_fs(mm_segment_t x);
19650+#else
19651 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19652+#endif
19653
19654 #define segment_eq(a, b) ((a).seg == (b).seg)
19655
19656@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19657 * checks that the pointer is in the user space range - after calling
19658 * this function, memory access functions may still return -EFAULT.
19659 */
19660-#define access_ok(type, addr, size) \
19661- likely(!__range_not_ok(addr, size, user_addr_max()))
19662+extern int _cond_resched(void);
19663+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19664+#define access_ok(type, addr, size) \
19665+({ \
19666+ unsigned long __size = size; \
19667+ unsigned long __addr = (unsigned long)addr; \
19668+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19669+ if (__ret_ao && __size) { \
19670+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19671+ unsigned long __end_ao = __addr + __size - 1; \
19672+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19673+ while (__addr_ao <= __end_ao) { \
19674+ char __c_ao; \
19675+ __addr_ao += PAGE_SIZE; \
19676+ if (__size > PAGE_SIZE) \
19677+ _cond_resched(); \
19678+ if (__get_user(__c_ao, (char __user *)__addr)) \
19679+ break; \
19680+ if (type != VERIFY_WRITE) { \
19681+ __addr = __addr_ao; \
19682+ continue; \
19683+ } \
19684+ if (__put_user(__c_ao, (char __user *)__addr)) \
19685+ break; \
19686+ __addr = __addr_ao; \
19687+ } \
19688+ } \
19689+ } \
19690+ __ret_ao; \
19691+})
19692
19693 /*
19694 * The exception table consists of pairs of addresses relative to the
19695@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19696 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19697 __chk_user_ptr(ptr); \
19698 might_fault(); \
19699+ pax_open_userland(); \
19700 asm volatile("call __get_user_%P3" \
19701 : "=a" (__ret_gu), "=r" (__val_gu) \
19702 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19703 (x) = (__typeof__(*(ptr))) __val_gu; \
19704+ pax_close_userland(); \
19705 __ret_gu; \
19706 })
19707
19708@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19709 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19710 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19711
19712-
19713+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19714+#define __copyuser_seg "gs;"
19715+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19716+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19717+#else
19718+#define __copyuser_seg
19719+#define __COPYUSER_SET_ES
19720+#define __COPYUSER_RESTORE_ES
19721+#endif
19722
19723 #ifdef CONFIG_X86_32
19724 #define __put_user_asm_u64(x, addr, err, errret) \
19725 asm volatile(ASM_STAC "\n" \
19726- "1: movl %%eax,0(%2)\n" \
19727- "2: movl %%edx,4(%2)\n" \
19728+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19729+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19730 "3: " ASM_CLAC "\n" \
19731 ".section .fixup,\"ax\"\n" \
19732 "4: movl %3,%0\n" \
19733@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19734
19735 #define __put_user_asm_ex_u64(x, addr) \
19736 asm volatile(ASM_STAC "\n" \
19737- "1: movl %%eax,0(%1)\n" \
19738- "2: movl %%edx,4(%1)\n" \
19739+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19740+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19741 "3: " ASM_CLAC "\n" \
19742 _ASM_EXTABLE_EX(1b, 2b) \
19743 _ASM_EXTABLE_EX(2b, 3b) \
19744@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19745 __typeof__(*(ptr)) __pu_val; \
19746 __chk_user_ptr(ptr); \
19747 might_fault(); \
19748- __pu_val = x; \
19749+ __pu_val = (x); \
19750+ pax_open_userland(); \
19751 switch (sizeof(*(ptr))) { \
19752 case 1: \
19753 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19754@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19755 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19756 break; \
19757 } \
19758+ pax_close_userland(); \
19759 __ret_pu; \
19760 })
19761
19762@@ -355,8 +401,10 @@ do { \
19763 } while (0)
19764
19765 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19766+do { \
19767+ pax_open_userland(); \
19768 asm volatile(ASM_STAC "\n" \
19769- "1: mov"itype" %2,%"rtype"1\n" \
19770+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19771 "2: " ASM_CLAC "\n" \
19772 ".section .fixup,\"ax\"\n" \
19773 "3: mov %3,%0\n" \
19774@@ -364,8 +412,10 @@ do { \
19775 " jmp 2b\n" \
19776 ".previous\n" \
19777 _ASM_EXTABLE(1b, 3b) \
19778- : "=r" (err), ltype(x) \
19779- : "m" (__m(addr)), "i" (errret), "0" (err))
19780+ : "=r" (err), ltype (x) \
19781+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19782+ pax_close_userland(); \
19783+} while (0)
19784
19785 #define __get_user_size_ex(x, ptr, size) \
19786 do { \
19787@@ -389,7 +439,7 @@ do { \
19788 } while (0)
19789
19790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19791- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19792+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19793 "2:\n" \
19794 _ASM_EXTABLE_EX(1b, 2b) \
19795 : ltype(x) : "m" (__m(addr)))
19796@@ -406,13 +456,24 @@ do { \
19797 int __gu_err; \
19798 unsigned long __gu_val; \
19799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19800- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19801+ (x) = (__typeof__(*(ptr)))__gu_val; \
19802 __gu_err; \
19803 })
19804
19805 /* FIXME: this hack is definitely wrong -AK */
19806 struct __large_struct { unsigned long buf[100]; };
19807-#define __m(x) (*(struct __large_struct __user *)(x))
19808+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19809+#define ____m(x) \
19810+({ \
19811+ unsigned long ____x = (unsigned long)(x); \
19812+ if (____x < pax_user_shadow_base) \
19813+ ____x += pax_user_shadow_base; \
19814+ (typeof(x))____x; \
19815+})
19816+#else
19817+#define ____m(x) (x)
19818+#endif
19819+#define __m(x) (*(struct __large_struct __user *)____m(x))
19820
19821 /*
19822 * Tell gcc we read from memory instead of writing: this is because
19823@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19824 * aliasing issues.
19825 */
19826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19827+do { \
19828+ pax_open_userland(); \
19829 asm volatile(ASM_STAC "\n" \
19830- "1: mov"itype" %"rtype"1,%2\n" \
19831+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19832 "2: " ASM_CLAC "\n" \
19833 ".section .fixup,\"ax\"\n" \
19834 "3: mov %3,%0\n" \
19835@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19836 ".previous\n" \
19837 _ASM_EXTABLE(1b, 3b) \
19838 : "=r"(err) \
19839- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19840+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19841+ pax_close_userland(); \
19842+} while (0)
19843
19844 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19845- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19846+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19847 "2:\n" \
19848 _ASM_EXTABLE_EX(1b, 2b) \
19849 : : ltype(x), "m" (__m(addr)))
19850@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19851 */
19852 #define uaccess_try do { \
19853 current_thread_info()->uaccess_err = 0; \
19854+ pax_open_userland(); \
19855 stac(); \
19856 barrier();
19857
19858 #define uaccess_catch(err) \
19859 clac(); \
19860+ pax_close_userland(); \
19861 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19862 } while (0)
19863
19864@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19865 * On error, the variable @x is set to zero.
19866 */
19867
19868+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19869+#define __get_user(x, ptr) get_user((x), (ptr))
19870+#else
19871 #define __get_user(x, ptr) \
19872 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19873+#endif
19874
19875 /**
19876 * __put_user: - Write a simple value into user space, with less checking.
19877@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19878 * Returns zero on success, or -EFAULT on error.
19879 */
19880
19881+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19882+#define __put_user(x, ptr) put_user((x), (ptr))
19883+#else
19884 #define __put_user(x, ptr) \
19885 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19886+#endif
19887
19888 #define __get_user_unaligned __get_user
19889 #define __put_user_unaligned __put_user
19890@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19891 #define get_user_ex(x, ptr) do { \
19892 unsigned long __gue_val; \
19893 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19894- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19895+ (x) = (__typeof__(*(ptr)))__gue_val; \
19896 } while (0)
19897
19898 #define put_user_try uaccess_try
19899@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
19900 extern __must_check long strnlen_user(const char __user *str, long n);
19901
19902 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19903-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19904+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19905
19906 extern void __cmpxchg_wrong_size(void)
19907 __compiletime_error("Bad argument size for cmpxchg");
19908@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19909 __typeof__(ptr) __uval = (uval); \
19910 __typeof__(*(ptr)) __old = (old); \
19911 __typeof__(*(ptr)) __new = (new); \
19912+ pax_open_userland(); \
19913 switch (size) { \
19914 case 1: \
19915 { \
19916 asm volatile("\t" ASM_STAC "\n" \
19917- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19918+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19919 "2:\t" ASM_CLAC "\n" \
19920 "\t.section .fixup, \"ax\"\n" \
19921 "3:\tmov %3, %0\n" \
19922 "\tjmp 2b\n" \
19923 "\t.previous\n" \
19924 _ASM_EXTABLE(1b, 3b) \
19925- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19926+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19927 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19928 : "memory" \
19929 ); \
19930@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
19931 case 2: \
19932 { \
19933 asm volatile("\t" ASM_STAC "\n" \
19934- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19935+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19936 "2:\t" ASM_CLAC "\n" \
19937 "\t.section .fixup, \"ax\"\n" \
19938 "3:\tmov %3, %0\n" \
19939 "\tjmp 2b\n" \
19940 "\t.previous\n" \
19941 _ASM_EXTABLE(1b, 3b) \
19942- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19943+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19944 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19945 : "memory" \
19946 ); \
19947@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
19948 case 4: \
19949 { \
19950 asm volatile("\t" ASM_STAC "\n" \
19951- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19952+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19953 "2:\t" ASM_CLAC "\n" \
19954 "\t.section .fixup, \"ax\"\n" \
19955 "3:\tmov %3, %0\n" \
19956 "\tjmp 2b\n" \
19957 "\t.previous\n" \
19958 _ASM_EXTABLE(1b, 3b) \
19959- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19960+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19961 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19962 : "memory" \
19963 ); \
19964@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
19965 __cmpxchg_wrong_size(); \
19966 \
19967 asm volatile("\t" ASM_STAC "\n" \
19968- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19969+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19970 "2:\t" ASM_CLAC "\n" \
19971 "\t.section .fixup, \"ax\"\n" \
19972 "3:\tmov %3, %0\n" \
19973 "\tjmp 2b\n" \
19974 "\t.previous\n" \
19975 _ASM_EXTABLE(1b, 3b) \
19976- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19977+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19978 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19979 : "memory" \
19980 ); \
19981@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
19982 default: \
19983 __cmpxchg_wrong_size(); \
19984 } \
19985+ pax_close_userland(); \
19986 *__uval = __old; \
19987 __ret; \
19988 })
19989@@ -636,17 +713,6 @@ extern struct movsl_mask {
19990
19991 #define ARCH_HAS_NOCACHE_UACCESS 1
19992
19993-#ifdef CONFIG_X86_32
19994-# include <asm/uaccess_32.h>
19995-#else
19996-# include <asm/uaccess_64.h>
19997-#endif
19998-
19999-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20000- unsigned n);
20001-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20002- unsigned n);
20003-
20004 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20005 # define copy_user_diag __compiletime_error
20006 #else
20007@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20008 extern void copy_user_diag("copy_from_user() buffer size is too small")
20009 copy_from_user_overflow(void);
20010 extern void copy_user_diag("copy_to_user() buffer size is too small")
20011-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20012+copy_to_user_overflow(void);
20013
20014 #undef copy_user_diag
20015
20016@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20017
20018 extern void
20019 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20020-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20021+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20022 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20023
20024 #else
20025@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20026
20027 #endif
20028
20029+#ifdef CONFIG_X86_32
20030+# include <asm/uaccess_32.h>
20031+#else
20032+# include <asm/uaccess_64.h>
20033+#endif
20034+
20035 static inline unsigned long __must_check
20036 copy_from_user(void *to, const void __user *from, unsigned long n)
20037 {
20038- int sz = __compiletime_object_size(to);
20039+ size_t sz = __compiletime_object_size(to);
20040
20041 might_fault();
20042
20043@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20044 * case, and do only runtime checking for non-constant sizes.
20045 */
20046
20047- if (likely(sz < 0 || sz >= n))
20048- n = _copy_from_user(to, from, n);
20049- else if(__builtin_constant_p(n))
20050- copy_from_user_overflow();
20051- else
20052- __copy_from_user_overflow(sz, n);
20053+ if (likely(sz != (size_t)-1 && sz < n)) {
20054+ if(__builtin_constant_p(n))
20055+ copy_from_user_overflow();
20056+ else
20057+ __copy_from_user_overflow(sz, n);
20058+ } else if (access_ok(VERIFY_READ, from, n))
20059+ n = __copy_from_user(to, from, n);
20060+ else if ((long)n > 0)
20061+ memset(to, 0, n);
20062
20063 return n;
20064 }
20065@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20066 static inline unsigned long __must_check
20067 copy_to_user(void __user *to, const void *from, unsigned long n)
20068 {
20069- int sz = __compiletime_object_size(from);
20070+ size_t sz = __compiletime_object_size(from);
20071
20072 might_fault();
20073
20074 /* See the comment in copy_from_user() above. */
20075- if (likely(sz < 0 || sz >= n))
20076- n = _copy_to_user(to, from, n);
20077- else if(__builtin_constant_p(n))
20078- copy_to_user_overflow();
20079- else
20080- __copy_to_user_overflow(sz, n);
20081+ if (likely(sz != (size_t)-1 && sz < n)) {
20082+ if(__builtin_constant_p(n))
20083+ copy_to_user_overflow();
20084+ else
20085+ __copy_to_user_overflow(sz, n);
20086+ } else if (access_ok(VERIFY_WRITE, to, n))
20087+ n = __copy_to_user(to, from, n);
20088
20089 return n;
20090 }
20091diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20092index 3c03a5d..edb68ae 100644
20093--- a/arch/x86/include/asm/uaccess_32.h
20094+++ b/arch/x86/include/asm/uaccess_32.h
20095@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20096 * anything, so this is accurate.
20097 */
20098
20099-static __always_inline unsigned long __must_check
20100+static __always_inline __size_overflow(3) unsigned long __must_check
20101 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20102 {
20103+ if ((long)n < 0)
20104+ return n;
20105+
20106+ check_object_size(from, n, true);
20107+
20108 if (__builtin_constant_p(n)) {
20109 unsigned long ret;
20110
20111@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20112 __copy_to_user(void __user *to, const void *from, unsigned long n)
20113 {
20114 might_fault();
20115+
20116 return __copy_to_user_inatomic(to, from, n);
20117 }
20118
20119-static __always_inline unsigned long
20120+static __always_inline __size_overflow(3) unsigned long
20121 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20122 {
20123+ if ((long)n < 0)
20124+ return n;
20125+
20126 /* Avoid zeroing the tail if the copy fails..
20127 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20128 * but as the zeroing behaviour is only significant when n is not
20129@@ -137,6 +146,12 @@ static __always_inline unsigned long
20130 __copy_from_user(void *to, const void __user *from, unsigned long n)
20131 {
20132 might_fault();
20133+
20134+ if ((long)n < 0)
20135+ return n;
20136+
20137+ check_object_size(to, n, false);
20138+
20139 if (__builtin_constant_p(n)) {
20140 unsigned long ret;
20141
20142@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20143 const void __user *from, unsigned long n)
20144 {
20145 might_fault();
20146+
20147+ if ((long)n < 0)
20148+ return n;
20149+
20150 if (__builtin_constant_p(n)) {
20151 unsigned long ret;
20152
20153@@ -181,7 +200,10 @@ static __always_inline unsigned long
20154 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20155 unsigned long n)
20156 {
20157- return __copy_from_user_ll_nocache_nozero(to, from, n);
20158+ if ((long)n < 0)
20159+ return n;
20160+
20161+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20162 }
20163
20164 #endif /* _ASM_X86_UACCESS_32_H */
20165diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20166index 12a26b9..c36fff5 100644
20167--- a/arch/x86/include/asm/uaccess_64.h
20168+++ b/arch/x86/include/asm/uaccess_64.h
20169@@ -10,6 +10,9 @@
20170 #include <asm/alternative.h>
20171 #include <asm/cpufeature.h>
20172 #include <asm/page.h>
20173+#include <asm/pgtable.h>
20174+
20175+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20176
20177 /*
20178 * Copy To/From Userspace
20179@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20180 __must_check unsigned long
20181 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20182
20183-static __always_inline __must_check unsigned long
20184-copy_user_generic(void *to, const void *from, unsigned len)
20185+static __always_inline __must_check __size_overflow(3) unsigned long
20186+copy_user_generic(void *to, const void *from, unsigned long len)
20187 {
20188 unsigned ret;
20189
20190@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20191 }
20192
20193 __must_check unsigned long
20194-copy_in_user(void __user *to, const void __user *from, unsigned len);
20195+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20196
20197 static __always_inline __must_check
20198-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20199+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20200 {
20201- int ret = 0;
20202+ size_t sz = __compiletime_object_size(dst);
20203+ unsigned ret = 0;
20204+
20205+ if (size > INT_MAX)
20206+ return size;
20207+
20208+ check_object_size(dst, size, false);
20209+
20210+#ifdef CONFIG_PAX_MEMORY_UDEREF
20211+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20212+ return size;
20213+#endif
20214+
20215+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20216+ if(__builtin_constant_p(size))
20217+ copy_from_user_overflow();
20218+ else
20219+ __copy_from_user_overflow(sz, size);
20220+ return size;
20221+ }
20222
20223 if (!__builtin_constant_p(size))
20224- return copy_user_generic(dst, (__force void *)src, size);
20225+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20226 switch (size) {
20227- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20228+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20229 ret, "b", "b", "=q", 1);
20230 return ret;
20231- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20232+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20233 ret, "w", "w", "=r", 2);
20234 return ret;
20235- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20236+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20237 ret, "l", "k", "=r", 4);
20238 return ret;
20239- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20240+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20241 ret, "q", "", "=r", 8);
20242 return ret;
20243 case 10:
20244- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20245+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20246 ret, "q", "", "=r", 10);
20247 if (unlikely(ret))
20248 return ret;
20249 __get_user_asm(*(u16 *)(8 + (char *)dst),
20250- (u16 __user *)(8 + (char __user *)src),
20251+ (const u16 __user *)(8 + (const char __user *)src),
20252 ret, "w", "w", "=r", 2);
20253 return ret;
20254 case 16:
20255- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20256+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20257 ret, "q", "", "=r", 16);
20258 if (unlikely(ret))
20259 return ret;
20260 __get_user_asm(*(u64 *)(8 + (char *)dst),
20261- (u64 __user *)(8 + (char __user *)src),
20262+ (const u64 __user *)(8 + (const char __user *)src),
20263 ret, "q", "", "=r", 8);
20264 return ret;
20265 default:
20266- return copy_user_generic(dst, (__force void *)src, size);
20267+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20268 }
20269 }
20270
20271 static __always_inline __must_check
20272-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20273+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20274 {
20275 might_fault();
20276 return __copy_from_user_nocheck(dst, src, size);
20277 }
20278
20279 static __always_inline __must_check
20280-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20281+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20282 {
20283- int ret = 0;
20284+ size_t sz = __compiletime_object_size(src);
20285+ unsigned ret = 0;
20286+
20287+ if (size > INT_MAX)
20288+ return size;
20289+
20290+ check_object_size(src, size, true);
20291+
20292+#ifdef CONFIG_PAX_MEMORY_UDEREF
20293+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20294+ return size;
20295+#endif
20296+
20297+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20298+ if(__builtin_constant_p(size))
20299+ copy_to_user_overflow();
20300+ else
20301+ __copy_to_user_overflow(sz, size);
20302+ return size;
20303+ }
20304
20305 if (!__builtin_constant_p(size))
20306- return copy_user_generic((__force void *)dst, src, size);
20307+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20308 switch (size) {
20309- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20310+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20311 ret, "b", "b", "iq", 1);
20312 return ret;
20313- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20314+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20315 ret, "w", "w", "ir", 2);
20316 return ret;
20317- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20318+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20319 ret, "l", "k", "ir", 4);
20320 return ret;
20321- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20322+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20323 ret, "q", "", "er", 8);
20324 return ret;
20325 case 10:
20326- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20327+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20328 ret, "q", "", "er", 10);
20329 if (unlikely(ret))
20330 return ret;
20331 asm("":::"memory");
20332- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20333+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20334 ret, "w", "w", "ir", 2);
20335 return ret;
20336 case 16:
20337- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20338+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20339 ret, "q", "", "er", 16);
20340 if (unlikely(ret))
20341 return ret;
20342 asm("":::"memory");
20343- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20344+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20345 ret, "q", "", "er", 8);
20346 return ret;
20347 default:
20348- return copy_user_generic((__force void *)dst, src, size);
20349+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20350 }
20351 }
20352
20353 static __always_inline __must_check
20354-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20355+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20356 {
20357 might_fault();
20358 return __copy_to_user_nocheck(dst, src, size);
20359 }
20360
20361 static __always_inline __must_check
20362-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20363+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20364 {
20365- int ret = 0;
20366+ unsigned ret = 0;
20367
20368 might_fault();
20369+
20370+ if (size > INT_MAX)
20371+ return size;
20372+
20373+#ifdef CONFIG_PAX_MEMORY_UDEREF
20374+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20375+ return size;
20376+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20377+ return size;
20378+#endif
20379+
20380 if (!__builtin_constant_p(size))
20381- return copy_user_generic((__force void *)dst,
20382- (__force void *)src, size);
20383+ return copy_user_generic((__force_kernel void *)____m(dst),
20384+ (__force_kernel const void *)____m(src), size);
20385 switch (size) {
20386 case 1: {
20387 u8 tmp;
20388- __get_user_asm(tmp, (u8 __user *)src,
20389+ __get_user_asm(tmp, (const u8 __user *)src,
20390 ret, "b", "b", "=q", 1);
20391 if (likely(!ret))
20392 __put_user_asm(tmp, (u8 __user *)dst,
20393@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20394 }
20395 case 2: {
20396 u16 tmp;
20397- __get_user_asm(tmp, (u16 __user *)src,
20398+ __get_user_asm(tmp, (const u16 __user *)src,
20399 ret, "w", "w", "=r", 2);
20400 if (likely(!ret))
20401 __put_user_asm(tmp, (u16 __user *)dst,
20402@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20403
20404 case 4: {
20405 u32 tmp;
20406- __get_user_asm(tmp, (u32 __user *)src,
20407+ __get_user_asm(tmp, (const u32 __user *)src,
20408 ret, "l", "k", "=r", 4);
20409 if (likely(!ret))
20410 __put_user_asm(tmp, (u32 __user *)dst,
20411@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20412 }
20413 case 8: {
20414 u64 tmp;
20415- __get_user_asm(tmp, (u64 __user *)src,
20416+ __get_user_asm(tmp, (const u64 __user *)src,
20417 ret, "q", "", "=r", 8);
20418 if (likely(!ret))
20419 __put_user_asm(tmp, (u64 __user *)dst,
20420@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20421 return ret;
20422 }
20423 default:
20424- return copy_user_generic((__force void *)dst,
20425- (__force void *)src, size);
20426+ return copy_user_generic((__force_kernel void *)____m(dst),
20427+ (__force_kernel const void *)____m(src), size);
20428 }
20429 }
20430
20431-static __must_check __always_inline int
20432-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20433+static __must_check __always_inline unsigned long
20434+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20435 {
20436 return __copy_from_user_nocheck(dst, src, size);
20437 }
20438
20439-static __must_check __always_inline int
20440-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20441+static __must_check __always_inline unsigned long
20442+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20443 {
20444 return __copy_to_user_nocheck(dst, src, size);
20445 }
20446
20447-extern long __copy_user_nocache(void *dst, const void __user *src,
20448- unsigned size, int zerorest);
20449+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20450+ unsigned long size, int zerorest);
20451
20452-static inline int
20453-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20454+static inline unsigned long
20455+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20456 {
20457 might_fault();
20458+
20459+ if (size > INT_MAX)
20460+ return size;
20461+
20462+#ifdef CONFIG_PAX_MEMORY_UDEREF
20463+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20464+ return size;
20465+#endif
20466+
20467 return __copy_user_nocache(dst, src, size, 1);
20468 }
20469
20470-static inline int
20471+static inline unsigned long
20472 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20473- unsigned size)
20474+ unsigned long size)
20475 {
20476+ if (size > INT_MAX)
20477+ return size;
20478+
20479+#ifdef CONFIG_PAX_MEMORY_UDEREF
20480+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20481+ return size;
20482+#endif
20483+
20484 return __copy_user_nocache(dst, src, size, 0);
20485 }
20486
20487 unsigned long
20488-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20489+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20490
20491 #endif /* _ASM_X86_UACCESS_64_H */
20492diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20493index 5b238981..77fdd78 100644
20494--- a/arch/x86/include/asm/word-at-a-time.h
20495+++ b/arch/x86/include/asm/word-at-a-time.h
20496@@ -11,7 +11,7 @@
20497 * and shift, for example.
20498 */
20499 struct word_at_a_time {
20500- const unsigned long one_bits, high_bits;
20501+ unsigned long one_bits, high_bits;
20502 };
20503
20504 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20505diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20506index f58a9c7..dc378042a 100644
20507--- a/arch/x86/include/asm/x86_init.h
20508+++ b/arch/x86/include/asm/x86_init.h
20509@@ -129,7 +129,7 @@ struct x86_init_ops {
20510 struct x86_init_timers timers;
20511 struct x86_init_iommu iommu;
20512 struct x86_init_pci pci;
20513-};
20514+} __no_const;
20515
20516 /**
20517 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20518@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20519 void (*setup_percpu_clockev)(void);
20520 void (*early_percpu_clock_init)(void);
20521 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20522-};
20523+} __no_const;
20524
20525 struct timespec;
20526
20527@@ -168,7 +168,7 @@ struct x86_platform_ops {
20528 void (*save_sched_clock_state)(void);
20529 void (*restore_sched_clock_state)(void);
20530 void (*apic_post_init)(void);
20531-};
20532+} __no_const;
20533
20534 struct pci_dev;
20535 struct msi_msg;
20536@@ -182,7 +182,7 @@ struct x86_msi_ops {
20537 void (*teardown_msi_irqs)(struct pci_dev *dev);
20538 void (*restore_msi_irqs)(struct pci_dev *dev);
20539 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20540-};
20541+} __no_const;
20542
20543 struct IO_APIC_route_entry;
20544 struct io_apic_irq_attr;
20545@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20546 unsigned int destination, int vector,
20547 struct io_apic_irq_attr *attr);
20548 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20549-};
20550+} __no_const;
20551
20552 extern struct x86_init_ops x86_init;
20553 extern struct x86_cpuinit_ops x86_cpuinit;
20554diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20555index 5eea099..ff7ef8d 100644
20556--- a/arch/x86/include/asm/xen/page.h
20557+++ b/arch/x86/include/asm/xen/page.h
20558@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20559 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20560 * cases needing an extended handling.
20561 */
20562-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20563+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20564 {
20565 unsigned long mfn;
20566
20567diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20568index c9a6d68..cb57f42 100644
20569--- a/arch/x86/include/asm/xsave.h
20570+++ b/arch/x86/include/asm/xsave.h
20571@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20572 if (unlikely(err))
20573 return -EFAULT;
20574
20575+ pax_open_userland();
20576 __asm__ __volatile__(ASM_STAC "\n"
20577- "1:"XSAVE"\n"
20578+ "1:"
20579+ __copyuser_seg
20580+ XSAVE"\n"
20581 "2: " ASM_CLAC "\n"
20582 xstate_fault
20583 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20584 : "memory");
20585+ pax_close_userland();
20586 return err;
20587 }
20588
20589@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20590 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20591 {
20592 int err = 0;
20593- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20594+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20595 u32 lmask = mask;
20596 u32 hmask = mask >> 32;
20597
20598+ pax_open_userland();
20599 __asm__ __volatile__(ASM_STAC "\n"
20600- "1:"XRSTOR"\n"
20601+ "1:"
20602+ __copyuser_seg
20603+ XRSTOR"\n"
20604 "2: " ASM_CLAC "\n"
20605 xstate_fault
20606 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20607 : "memory"); /* memory required? */
20608+ pax_close_userland();
20609 return err;
20610 }
20611
20612diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20613index d993e33..8db1b18 100644
20614--- a/arch/x86/include/uapi/asm/e820.h
20615+++ b/arch/x86/include/uapi/asm/e820.h
20616@@ -58,7 +58,7 @@ struct e820map {
20617 #define ISA_START_ADDRESS 0xa0000
20618 #define ISA_END_ADDRESS 0x100000
20619
20620-#define BIOS_BEGIN 0x000a0000
20621+#define BIOS_BEGIN 0x000c0000
20622 #define BIOS_END 0x00100000
20623
20624 #define BIOS_ROM_BASE 0xffe00000
20625diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20626index 7b0a55a..ad115bf 100644
20627--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20628+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20629@@ -49,7 +49,6 @@
20630 #define EFLAGS 144
20631 #define RSP 152
20632 #define SS 160
20633-#define ARGOFFSET R11
20634 #endif /* __ASSEMBLY__ */
20635
20636 /* top of stack page */
20637diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20638index 5d4502c..a567e09 100644
20639--- a/arch/x86/kernel/Makefile
20640+++ b/arch/x86/kernel/Makefile
20641@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20642 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20643 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20644 obj-y += probe_roms.o
20645-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20646+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20647 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20648 obj-$(CONFIG_X86_64) += mcount_64.o
20649 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20650diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20651index b5ddc96..490b4e4 100644
20652--- a/arch/x86/kernel/acpi/boot.c
20653+++ b/arch/x86/kernel/acpi/boot.c
20654@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20655 * If your system is blacklisted here, but you find that acpi=force
20656 * works for you, please contact linux-acpi@vger.kernel.org
20657 */
20658-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20659+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20660 /*
20661 * Boxes that need ACPI disabled
20662 */
20663@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20664 };
20665
20666 /* second table for DMI checks that should run after early-quirks */
20667-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20668+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20669 /*
20670 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20671 * which includes some code which overrides all temperature
20672diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20673index 3136820..e2c6577 100644
20674--- a/arch/x86/kernel/acpi/sleep.c
20675+++ b/arch/x86/kernel/acpi/sleep.c
20676@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20677 #else /* CONFIG_64BIT */
20678 #ifdef CONFIG_SMP
20679 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20680+
20681+ pax_open_kernel();
20682 early_gdt_descr.address =
20683 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20684+ pax_close_kernel();
20685+
20686 initial_gs = per_cpu_offset(smp_processor_id());
20687 #endif
20688 initial_code = (unsigned long)wakeup_long64;
20689diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20690index 665c6b7..eae4d56 100644
20691--- a/arch/x86/kernel/acpi/wakeup_32.S
20692+++ b/arch/x86/kernel/acpi/wakeup_32.S
20693@@ -29,13 +29,11 @@ wakeup_pmode_return:
20694 # and restore the stack ... but you need gdt for this to work
20695 movl saved_context_esp, %esp
20696
20697- movl %cs:saved_magic, %eax
20698- cmpl $0x12345678, %eax
20699+ cmpl $0x12345678, saved_magic
20700 jne bogus_magic
20701
20702 # jump to place where we left off
20703- movl saved_eip, %eax
20704- jmp *%eax
20705+ jmp *(saved_eip)
20706
20707 bogus_magic:
20708 jmp bogus_magic
20709diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20710index 703130f..27a155d 100644
20711--- a/arch/x86/kernel/alternative.c
20712+++ b/arch/x86/kernel/alternative.c
20713@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20714 */
20715 for (a = start; a < end; a++) {
20716 instr = (u8 *)&a->instr_offset + a->instr_offset;
20717+
20718+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20719+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20720+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20721+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20722+#endif
20723+
20724 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20725 BUG_ON(a->replacementlen > a->instrlen);
20726 BUG_ON(a->instrlen > sizeof(insnbuf));
20727@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20728 add_nops(insnbuf + a->replacementlen,
20729 a->instrlen - a->replacementlen);
20730
20731+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20732+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20733+ instr = ktva_ktla(instr);
20734+#endif
20735+
20736 text_poke_early(instr, insnbuf, a->instrlen);
20737 }
20738 }
20739@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20740 for (poff = start; poff < end; poff++) {
20741 u8 *ptr = (u8 *)poff + *poff;
20742
20743+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20744+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20745+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20746+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20747+#endif
20748+
20749 if (!*poff || ptr < text || ptr >= text_end)
20750 continue;
20751 /* turn DS segment override prefix into lock prefix */
20752- if (*ptr == 0x3e)
20753+ if (*ktla_ktva(ptr) == 0x3e)
20754 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20755 }
20756 mutex_unlock(&text_mutex);
20757@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20758 for (poff = start; poff < end; poff++) {
20759 u8 *ptr = (u8 *)poff + *poff;
20760
20761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20762+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20763+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20764+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20765+#endif
20766+
20767 if (!*poff || ptr < text || ptr >= text_end)
20768 continue;
20769 /* turn lock prefix into DS segment override prefix */
20770- if (*ptr == 0xf0)
20771+ if (*ktla_ktva(ptr) == 0xf0)
20772 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20773 }
20774 mutex_unlock(&text_mutex);
20775@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20776
20777 BUG_ON(p->len > MAX_PATCH_LEN);
20778 /* prep the buffer with the original instructions */
20779- memcpy(insnbuf, p->instr, p->len);
20780+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20781 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20782 (unsigned long)p->instr, p->len);
20783
20784@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20785 if (!uniproc_patched || num_possible_cpus() == 1)
20786 free_init_pages("SMP alternatives",
20787 (unsigned long)__smp_locks,
20788- (unsigned long)__smp_locks_end);
20789+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20790 #endif
20791
20792 apply_paravirt(__parainstructions, __parainstructions_end);
20793@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20794 * instructions. And on the local CPU you need to be protected again NMI or MCE
20795 * handlers seeing an inconsistent instruction while you patch.
20796 */
20797-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20798+void *__kprobes text_poke_early(void *addr, const void *opcode,
20799 size_t len)
20800 {
20801 unsigned long flags;
20802 local_irq_save(flags);
20803- memcpy(addr, opcode, len);
20804+
20805+ pax_open_kernel();
20806+ memcpy(ktla_ktva(addr), opcode, len);
20807 sync_core();
20808+ pax_close_kernel();
20809+
20810 local_irq_restore(flags);
20811 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20812 that causes hangs on some VIA CPUs. */
20813@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20814 */
20815 void *text_poke(void *addr, const void *opcode, size_t len)
20816 {
20817- unsigned long flags;
20818- char *vaddr;
20819+ unsigned char *vaddr = ktla_ktva(addr);
20820 struct page *pages[2];
20821- int i;
20822+ size_t i;
20823
20824 if (!core_kernel_text((unsigned long)addr)) {
20825- pages[0] = vmalloc_to_page(addr);
20826- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20827+ pages[0] = vmalloc_to_page(vaddr);
20828+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20829 } else {
20830- pages[0] = virt_to_page(addr);
20831+ pages[0] = virt_to_page(vaddr);
20832 WARN_ON(!PageReserved(pages[0]));
20833- pages[1] = virt_to_page(addr + PAGE_SIZE);
20834+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20835 }
20836 BUG_ON(!pages[0]);
20837- local_irq_save(flags);
20838- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20839- if (pages[1])
20840- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20841- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20842- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20843- clear_fixmap(FIX_TEXT_POKE0);
20844- if (pages[1])
20845- clear_fixmap(FIX_TEXT_POKE1);
20846- local_flush_tlb();
20847- sync_core();
20848- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20849- that causes hangs on some VIA CPUs. */
20850+ text_poke_early(addr, opcode, len);
20851 for (i = 0; i < len; i++)
20852- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20853- local_irq_restore(flags);
20854+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20855 return addr;
20856 }
20857
20858@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20859 if (likely(!bp_patching_in_progress))
20860 return 0;
20861
20862- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20863+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20864 return 0;
20865
20866 /* set up the specified breakpoint handler */
20867@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20868 */
20869 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20870 {
20871- unsigned char int3 = 0xcc;
20872+ const unsigned char int3 = 0xcc;
20873
20874 bp_int3_handler = handler;
20875 bp_int3_addr = (u8 *)addr + sizeof(int3);
20876diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20877index 29b5b18..3bdfc29 100644
20878--- a/arch/x86/kernel/apic/apic.c
20879+++ b/arch/x86/kernel/apic/apic.c
20880@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20881 /*
20882 * Debug level, exported for io_apic.c
20883 */
20884-unsigned int apic_verbosity;
20885+int apic_verbosity;
20886
20887 int pic_mode;
20888
20889@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20890 apic_write(APIC_ESR, 0);
20891 v = apic_read(APIC_ESR);
20892 ack_APIC_irq();
20893- atomic_inc(&irq_err_count);
20894+ atomic_inc_unchecked(&irq_err_count);
20895
20896 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20897 smp_processor_id(), v);
20898diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20899index de918c4..32eed23 100644
20900--- a/arch/x86/kernel/apic/apic_flat_64.c
20901+++ b/arch/x86/kernel/apic/apic_flat_64.c
20902@@ -154,7 +154,7 @@ static int flat_probe(void)
20903 return 1;
20904 }
20905
20906-static struct apic apic_flat = {
20907+static struct apic apic_flat __read_only = {
20908 .name = "flat",
20909 .probe = flat_probe,
20910 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20911@@ -260,7 +260,7 @@ static int physflat_probe(void)
20912 return 0;
20913 }
20914
20915-static struct apic apic_physflat = {
20916+static struct apic apic_physflat __read_only = {
20917
20918 .name = "physical flat",
20919 .probe = physflat_probe,
20920diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20921index b205cdb..d8503ff 100644
20922--- a/arch/x86/kernel/apic/apic_noop.c
20923+++ b/arch/x86/kernel/apic/apic_noop.c
20924@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20925 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20926 }
20927
20928-struct apic apic_noop = {
20929+struct apic apic_noop __read_only = {
20930 .name = "noop",
20931 .probe = noop_probe,
20932 .acpi_madt_oem_check = NULL,
20933diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20934index c4a8d63..fe893ac 100644
20935--- a/arch/x86/kernel/apic/bigsmp_32.c
20936+++ b/arch/x86/kernel/apic/bigsmp_32.c
20937@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20938 return dmi_bigsmp;
20939 }
20940
20941-static struct apic apic_bigsmp = {
20942+static struct apic apic_bigsmp __read_only = {
20943
20944 .name = "bigsmp",
20945 .probe = probe_bigsmp,
20946diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20947index 3f5f604..309c0e6 100644
20948--- a/arch/x86/kernel/apic/io_apic.c
20949+++ b/arch/x86/kernel/apic/io_apic.c
20950@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20951 return ret;
20952 }
20953
20954-atomic_t irq_mis_count;
20955+atomic_unchecked_t irq_mis_count;
20956
20957 #ifdef CONFIG_GENERIC_PENDING_IRQ
20958 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20959@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20960 * at the cpu.
20961 */
20962 if (!(v & (1 << (i & 0x1f)))) {
20963- atomic_inc(&irq_mis_count);
20964+ atomic_inc_unchecked(&irq_mis_count);
20965
20966 eoi_ioapic_irq(irq, cfg);
20967 }
20968diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20969index bda4886..f9c7195 100644
20970--- a/arch/x86/kernel/apic/probe_32.c
20971+++ b/arch/x86/kernel/apic/probe_32.c
20972@@ -72,7 +72,7 @@ static int probe_default(void)
20973 return 1;
20974 }
20975
20976-static struct apic apic_default = {
20977+static struct apic apic_default __read_only = {
20978
20979 .name = "default",
20980 .probe = probe_default,
20981diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20982index 6cedd79..023ff8e 100644
20983--- a/arch/x86/kernel/apic/vector.c
20984+++ b/arch/x86/kernel/apic/vector.c
20985@@ -21,7 +21,7 @@
20986
20987 static DEFINE_RAW_SPINLOCK(vector_lock);
20988
20989-void lock_vector_lock(void)
20990+void lock_vector_lock(void) __acquires(vector_lock)
20991 {
20992 /* Used to the online set of cpus does not change
20993 * during assign_irq_vector.
20994@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20995 raw_spin_lock(&vector_lock);
20996 }
20997
20998-void unlock_vector_lock(void)
20999+void unlock_vector_lock(void) __releases(vector_lock)
21000 {
21001 raw_spin_unlock(&vector_lock);
21002 }
21003diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21004index e658f21..b695a1a 100644
21005--- a/arch/x86/kernel/apic/x2apic_cluster.c
21006+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21007@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21008 return notifier_from_errno(err);
21009 }
21010
21011-static struct notifier_block __refdata x2apic_cpu_notifier = {
21012+static struct notifier_block x2apic_cpu_notifier = {
21013 .notifier_call = update_clusterinfo,
21014 };
21015
21016@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21017 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21018 }
21019
21020-static struct apic apic_x2apic_cluster = {
21021+static struct apic apic_x2apic_cluster __read_only = {
21022
21023 .name = "cluster x2apic",
21024 .probe = x2apic_cluster_probe,
21025diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21026index 6fae733..5ca17af 100644
21027--- a/arch/x86/kernel/apic/x2apic_phys.c
21028+++ b/arch/x86/kernel/apic/x2apic_phys.c
21029@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21030 return apic == &apic_x2apic_phys;
21031 }
21032
21033-static struct apic apic_x2apic_phys = {
21034+static struct apic apic_x2apic_phys __read_only = {
21035
21036 .name = "physical x2apic",
21037 .probe = x2apic_phys_probe,
21038diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21039index 8e9dcfd..c61b3e4 100644
21040--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21041+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21042@@ -348,7 +348,7 @@ static int uv_probe(void)
21043 return apic == &apic_x2apic_uv_x;
21044 }
21045
21046-static struct apic __refdata apic_x2apic_uv_x = {
21047+static struct apic apic_x2apic_uv_x __read_only = {
21048
21049 .name = "UV large system",
21050 .probe = uv_probe,
21051diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21052index 927ec92..0dc3bd4 100644
21053--- a/arch/x86/kernel/apm_32.c
21054+++ b/arch/x86/kernel/apm_32.c
21055@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
21056 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21057 * even though they are called in protected mode.
21058 */
21059-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21060+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21061 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21062
21063 static const char driver_version[] = "1.16ac"; /* no spaces */
21064@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
21065 BUG_ON(cpu != 0);
21066 gdt = get_cpu_gdt_table(cpu);
21067 save_desc_40 = gdt[0x40 / 8];
21068+
21069+ pax_open_kernel();
21070 gdt[0x40 / 8] = bad_bios_desc;
21071+ pax_close_kernel();
21072
21073 apm_irq_save(flags);
21074 APM_DO_SAVE_SEGS;
21075@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21076 &call->esi);
21077 APM_DO_RESTORE_SEGS;
21078 apm_irq_restore(flags);
21079+
21080+ pax_open_kernel();
21081 gdt[0x40 / 8] = save_desc_40;
21082+ pax_close_kernel();
21083+
21084 put_cpu();
21085
21086 return call->eax & 0xff;
21087@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21088 BUG_ON(cpu != 0);
21089 gdt = get_cpu_gdt_table(cpu);
21090 save_desc_40 = gdt[0x40 / 8];
21091+
21092+ pax_open_kernel();
21093 gdt[0x40 / 8] = bad_bios_desc;
21094+ pax_close_kernel();
21095
21096 apm_irq_save(flags);
21097 APM_DO_SAVE_SEGS;
21098@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21099 &call->eax);
21100 APM_DO_RESTORE_SEGS;
21101 apm_irq_restore(flags);
21102+
21103+ pax_open_kernel();
21104 gdt[0x40 / 8] = save_desc_40;
21105+ pax_close_kernel();
21106+
21107 put_cpu();
21108 return error;
21109 }
21110@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21111 * code to that CPU.
21112 */
21113 gdt = get_cpu_gdt_table(0);
21114+
21115+ pax_open_kernel();
21116 set_desc_base(&gdt[APM_CS >> 3],
21117 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21118 set_desc_base(&gdt[APM_CS_16 >> 3],
21119 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21120 set_desc_base(&gdt[APM_DS >> 3],
21121 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21122+ pax_close_kernel();
21123
21124 proc_create("apm", 0, NULL, &apm_file_ops);
21125
21126diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21127index 9f6b934..cf5ffb3 100644
21128--- a/arch/x86/kernel/asm-offsets.c
21129+++ b/arch/x86/kernel/asm-offsets.c
21130@@ -32,6 +32,8 @@ void common(void) {
21131 OFFSET(TI_flags, thread_info, flags);
21132 OFFSET(TI_status, thread_info, status);
21133 OFFSET(TI_addr_limit, thread_info, addr_limit);
21134+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21135+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21136
21137 BLANK();
21138 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21139@@ -52,8 +54,26 @@ void common(void) {
21140 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21141 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21142 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21143+
21144+#ifdef CONFIG_PAX_KERNEXEC
21145+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21146 #endif
21147
21148+#ifdef CONFIG_PAX_MEMORY_UDEREF
21149+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21150+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21151+#ifdef CONFIG_X86_64
21152+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21153+#endif
21154+#endif
21155+
21156+#endif
21157+
21158+ BLANK();
21159+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21160+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21161+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21162+
21163 #ifdef CONFIG_XEN
21164 BLANK();
21165 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21166diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21167index fdcbb4d..036dd93 100644
21168--- a/arch/x86/kernel/asm-offsets_64.c
21169+++ b/arch/x86/kernel/asm-offsets_64.c
21170@@ -80,6 +80,7 @@ int main(void)
21171 BLANK();
21172 #undef ENTRY
21173
21174+ DEFINE(TSS_size, sizeof(struct tss_struct));
21175 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21176 BLANK();
21177
21178diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21179index 80091ae..0c5184f 100644
21180--- a/arch/x86/kernel/cpu/Makefile
21181+++ b/arch/x86/kernel/cpu/Makefile
21182@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21183 CFLAGS_REMOVE_perf_event.o = -pg
21184 endif
21185
21186-# Make sure load_percpu_segment has no stackprotector
21187-nostackp := $(call cc-option, -fno-stack-protector)
21188-CFLAGS_common.o := $(nostackp)
21189-
21190 obj-y := intel_cacheinfo.o scattered.o topology.o
21191 obj-y += common.o
21192 obj-y += rdrand.o
21193diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21194index 15c5df9..d9a604a 100644
21195--- a/arch/x86/kernel/cpu/amd.c
21196+++ b/arch/x86/kernel/cpu/amd.c
21197@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21198 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21199 {
21200 /* AMD errata T13 (order #21922) */
21201- if ((c->x86 == 6)) {
21202+ if (c->x86 == 6) {
21203 /* Duron Rev A0 */
21204 if (c->x86_model == 3 && c->x86_mask == 0)
21205 size = 64;
21206diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21207index c604965..1558f4a 100644
21208--- a/arch/x86/kernel/cpu/common.c
21209+++ b/arch/x86/kernel/cpu/common.c
21210@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21211
21212 static const struct cpu_dev *this_cpu = &default_cpu;
21213
21214-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21215-#ifdef CONFIG_X86_64
21216- /*
21217- * We need valid kernel segments for data and code in long mode too
21218- * IRET will check the segment types kkeil 2000/10/28
21219- * Also sysret mandates a special GDT layout
21220- *
21221- * TLS descriptors are currently at a different place compared to i386.
21222- * Hopefully nobody expects them at a fixed place (Wine?)
21223- */
21224- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21225- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21226- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21227- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21228- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21229- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21230-#else
21231- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21232- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21233- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21234- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21235- /*
21236- * Segments used for calling PnP BIOS have byte granularity.
21237- * They code segments and data segments have fixed 64k limits,
21238- * the transfer segment sizes are set at run time.
21239- */
21240- /* 32-bit code */
21241- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21242- /* 16-bit code */
21243- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21244- /* 16-bit data */
21245- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21246- /* 16-bit data */
21247- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21248- /* 16-bit data */
21249- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21250- /*
21251- * The APM segments have byte granularity and their bases
21252- * are set at run time. All have 64k limits.
21253- */
21254- /* 32-bit code */
21255- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21256- /* 16-bit code */
21257- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21258- /* data */
21259- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21260-
21261- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21262- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21263- GDT_STACK_CANARY_INIT
21264-#endif
21265-} };
21266-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21267-
21268 static int __init x86_xsave_setup(char *s)
21269 {
21270 if (strlen(s))
21271@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21272 }
21273 }
21274
21275+#ifdef CONFIG_X86_64
21276+static __init int setup_disable_pcid(char *arg)
21277+{
21278+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21279+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21280+
21281+#ifdef CONFIG_PAX_MEMORY_UDEREF
21282+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21283+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21284+#endif
21285+
21286+ return 1;
21287+}
21288+__setup("nopcid", setup_disable_pcid);
21289+
21290+static void setup_pcid(struct cpuinfo_x86 *c)
21291+{
21292+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21293+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21294+
21295+#ifdef CONFIG_PAX_MEMORY_UDEREF
21296+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21297+ pax_open_kernel();
21298+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21299+ pax_close_kernel();
21300+ printk("PAX: slow and weak UDEREF enabled\n");
21301+ } else
21302+ printk("PAX: UDEREF disabled\n");
21303+#endif
21304+
21305+ return;
21306+ }
21307+
21308+ printk("PAX: PCID detected\n");
21309+ set_in_cr4(X86_CR4_PCIDE);
21310+
21311+#ifdef CONFIG_PAX_MEMORY_UDEREF
21312+ pax_open_kernel();
21313+ clone_pgd_mask = ~(pgdval_t)0UL;
21314+ pax_close_kernel();
21315+ if (pax_user_shadow_base)
21316+ printk("PAX: weak UDEREF enabled\n");
21317+ else {
21318+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21319+ printk("PAX: strong UDEREF enabled\n");
21320+ }
21321+#endif
21322+
21323+ if (cpu_has(c, X86_FEATURE_INVPCID))
21324+ printk("PAX: INVPCID detected\n");
21325+}
21326+#endif
21327+
21328 /*
21329 * Some CPU features depend on higher CPUID levels, which may not always
21330 * be available due to CPUID level capping or broken virtualization
21331@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21332 {
21333 struct desc_ptr gdt_descr;
21334
21335- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21336+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21337 gdt_descr.size = GDT_SIZE - 1;
21338 load_gdt(&gdt_descr);
21339 /* Reload the per-cpu base */
21340@@ -895,6 +894,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21341 setup_smep(c);
21342 setup_smap(c);
21343
21344+#ifdef CONFIG_X86_32
21345+#ifdef CONFIG_PAX_PAGEEXEC
21346+ if (!(__supported_pte_mask & _PAGE_NX))
21347+ clear_cpu_cap(c, X86_FEATURE_PSE);
21348+#endif
21349+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21350+ clear_cpu_cap(c, X86_FEATURE_SEP);
21351+#endif
21352+#endif
21353+
21354+#ifdef CONFIG_X86_64
21355+ setup_pcid(c);
21356+#endif
21357+
21358 /*
21359 * The vendor-specific functions might have changed features.
21360 * Now we do "generic changes."
21361@@ -977,7 +990,7 @@ static void syscall32_cpu_init(void)
21362 void enable_sep_cpu(void)
21363 {
21364 int cpu = get_cpu();
21365- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21366+ struct tss_struct *tss = init_tss + cpu;
21367
21368 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21369 put_cpu();
21370@@ -1115,14 +1128,16 @@ static __init int setup_disablecpuid(char *arg)
21371 }
21372 __setup("clearcpuid=", setup_disablecpuid);
21373
21374+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21375+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21376+
21377 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21378- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21379+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21380 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21381
21382 #ifdef CONFIG_X86_64
21383-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21384-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21385- (unsigned long) debug_idt_table };
21386+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21387+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21388
21389 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21390 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21391@@ -1299,7 +1314,7 @@ void cpu_init(void)
21392 */
21393 load_ucode_ap();
21394
21395- t = &per_cpu(init_tss, cpu);
21396+ t = init_tss + cpu;
21397 oist = &per_cpu(orig_ist, cpu);
21398
21399 #ifdef CONFIG_NUMA
21400@@ -1331,7 +1346,6 @@ void cpu_init(void)
21401 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21402 barrier();
21403
21404- x86_configure_nx();
21405 enable_x2apic();
21406
21407 /*
21408@@ -1383,7 +1397,7 @@ void cpu_init(void)
21409 {
21410 int cpu = smp_processor_id();
21411 struct task_struct *curr = current;
21412- struct tss_struct *t = &per_cpu(init_tss, cpu);
21413+ struct tss_struct *t = init_tss + cpu;
21414 struct thread_struct *thread = &curr->thread;
21415
21416 wait_for_master_cpu(cpu);
21417diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21418index c703507..28535e3 100644
21419--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21420+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21421@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21422 };
21423
21424 #ifdef CONFIG_AMD_NB
21425+static struct attribute *default_attrs_amd_nb[] = {
21426+ &type.attr,
21427+ &level.attr,
21428+ &coherency_line_size.attr,
21429+ &physical_line_partition.attr,
21430+ &ways_of_associativity.attr,
21431+ &number_of_sets.attr,
21432+ &size.attr,
21433+ &shared_cpu_map.attr,
21434+ &shared_cpu_list.attr,
21435+ NULL,
21436+ NULL,
21437+ NULL,
21438+ NULL
21439+};
21440+
21441 static struct attribute **amd_l3_attrs(void)
21442 {
21443 static struct attribute **attrs;
21444@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21445
21446 n = ARRAY_SIZE(default_attrs);
21447
21448- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21449- n += 2;
21450-
21451- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21452- n += 1;
21453-
21454- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21455- if (attrs == NULL)
21456- return attrs = default_attrs;
21457-
21458- for (n = 0; default_attrs[n]; n++)
21459- attrs[n] = default_attrs[n];
21460+ attrs = default_attrs_amd_nb;
21461
21462 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21463 attrs[n++] = &cache_disable_0.attr;
21464@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21465 .default_attrs = default_attrs,
21466 };
21467
21468+#ifdef CONFIG_AMD_NB
21469+static struct kobj_type ktype_cache_amd_nb = {
21470+ .sysfs_ops = &sysfs_ops,
21471+ .default_attrs = default_attrs_amd_nb,
21472+};
21473+#endif
21474+
21475 static struct kobj_type ktype_percpu_entry = {
21476 .sysfs_ops = &sysfs_ops,
21477 };
21478@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21479 return retval;
21480 }
21481
21482+#ifdef CONFIG_AMD_NB
21483+ amd_l3_attrs();
21484+#endif
21485+
21486 for (i = 0; i < num_cache_leaves; i++) {
21487+ struct kobj_type *ktype;
21488+
21489 this_object = INDEX_KOBJECT_PTR(cpu, i);
21490 this_object->cpu = cpu;
21491 this_object->index = i;
21492
21493 this_leaf = CPUID4_INFO_IDX(cpu, i);
21494
21495- ktype_cache.default_attrs = default_attrs;
21496+ ktype = &ktype_cache;
21497 #ifdef CONFIG_AMD_NB
21498 if (this_leaf->base.nb)
21499- ktype_cache.default_attrs = amd_l3_attrs();
21500+ ktype = &ktype_cache_amd_nb;
21501 #endif
21502 retval = kobject_init_and_add(&(this_object->kobj),
21503- &ktype_cache,
21504+ ktype,
21505 per_cpu(ici_cache_kobject, cpu),
21506 "index%1lu", i);
21507 if (unlikely(retval)) {
21508diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21509index d2c6116..62fd7aa 100644
21510--- a/arch/x86/kernel/cpu/mcheck/mce.c
21511+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21512@@ -45,6 +45,7 @@
21513 #include <asm/processor.h>
21514 #include <asm/mce.h>
21515 #include <asm/msr.h>
21516+#include <asm/local.h>
21517
21518 #include "mce-internal.h"
21519
21520@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21521 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21522 m->cs, m->ip);
21523
21524- if (m->cs == __KERNEL_CS)
21525+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21526 print_symbol("{%s}", m->ip);
21527 pr_cont("\n");
21528 }
21529@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21530
21531 #define PANIC_TIMEOUT 5 /* 5 seconds */
21532
21533-static atomic_t mce_panicked;
21534+static atomic_unchecked_t mce_panicked;
21535
21536 static int fake_panic;
21537-static atomic_t mce_fake_panicked;
21538+static atomic_unchecked_t mce_fake_panicked;
21539
21540 /* Panic in progress. Enable interrupts and wait for final IPI */
21541 static void wait_for_panic(void)
21542@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21543 /*
21544 * Make sure only one CPU runs in machine check panic
21545 */
21546- if (atomic_inc_return(&mce_panicked) > 1)
21547+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21548 wait_for_panic();
21549 barrier();
21550
21551@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21552 console_verbose();
21553 } else {
21554 /* Don't log too much for fake panic */
21555- if (atomic_inc_return(&mce_fake_panicked) > 1)
21556+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21557 return;
21558 }
21559 /* First print corrected ones that are still unlogged */
21560@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21561 if (!fake_panic) {
21562 if (panic_timeout == 0)
21563 panic_timeout = mca_cfg.panic_timeout;
21564- panic(msg);
21565+ panic("%s", msg);
21566 } else
21567 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21568 }
21569@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21570 * might have been modified by someone else.
21571 */
21572 rmb();
21573- if (atomic_read(&mce_panicked))
21574+ if (atomic_read_unchecked(&mce_panicked))
21575 wait_for_panic();
21576 if (!mca_cfg.monarch_timeout)
21577 goto out;
21578@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21579 }
21580
21581 /* Call the installed machine check handler for this CPU setup. */
21582-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21583+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21584 unexpected_machine_check;
21585
21586 /*
21587@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21588 return;
21589 }
21590
21591+ pax_open_kernel();
21592 machine_check_vector = do_machine_check;
21593+ pax_close_kernel();
21594
21595 __mcheck_cpu_init_generic();
21596 __mcheck_cpu_init_vendor(c);
21597@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21598 */
21599
21600 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21601-static int mce_chrdev_open_count; /* #times opened */
21602+static local_t mce_chrdev_open_count; /* #times opened */
21603 static int mce_chrdev_open_exclu; /* already open exclusive? */
21604
21605 static int mce_chrdev_open(struct inode *inode, struct file *file)
21606@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21607 spin_lock(&mce_chrdev_state_lock);
21608
21609 if (mce_chrdev_open_exclu ||
21610- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21611+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21612 spin_unlock(&mce_chrdev_state_lock);
21613
21614 return -EBUSY;
21615@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21616
21617 if (file->f_flags & O_EXCL)
21618 mce_chrdev_open_exclu = 1;
21619- mce_chrdev_open_count++;
21620+ local_inc(&mce_chrdev_open_count);
21621
21622 spin_unlock(&mce_chrdev_state_lock);
21623
21624@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21625 {
21626 spin_lock(&mce_chrdev_state_lock);
21627
21628- mce_chrdev_open_count--;
21629+ local_dec(&mce_chrdev_open_count);
21630 mce_chrdev_open_exclu = 0;
21631
21632 spin_unlock(&mce_chrdev_state_lock);
21633@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21634
21635 for (i = 0; i < mca_cfg.banks; i++) {
21636 struct mce_bank *b = &mce_banks[i];
21637- struct device_attribute *a = &b->attr;
21638+ device_attribute_no_const *a = &b->attr;
21639
21640 sysfs_attr_init(&a->attr);
21641 a->attr.name = b->attrname;
21642@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21643 static void mce_reset(void)
21644 {
21645 cpu_missing = 0;
21646- atomic_set(&mce_fake_panicked, 0);
21647+ atomic_set_unchecked(&mce_fake_panicked, 0);
21648 atomic_set(&mce_executing, 0);
21649 atomic_set(&mce_callin, 0);
21650 atomic_set(&global_nwo, 0);
21651diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21652index a304298..49b6d06 100644
21653--- a/arch/x86/kernel/cpu/mcheck/p5.c
21654+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21655@@ -10,6 +10,7 @@
21656 #include <asm/processor.h>
21657 #include <asm/mce.h>
21658 #include <asm/msr.h>
21659+#include <asm/pgtable.h>
21660
21661 /* By default disabled */
21662 int mce_p5_enabled __read_mostly;
21663@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21664 if (!cpu_has(c, X86_FEATURE_MCE))
21665 return;
21666
21667+ pax_open_kernel();
21668 machine_check_vector = pentium_machine_check;
21669+ pax_close_kernel();
21670 /* Make sure the vector pointer is visible before we enable MCEs: */
21671 wmb();
21672
21673diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21674index 7dc5564..1273569 100644
21675--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21676+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21677@@ -9,6 +9,7 @@
21678 #include <asm/processor.h>
21679 #include <asm/mce.h>
21680 #include <asm/msr.h>
21681+#include <asm/pgtable.h>
21682
21683 /* Machine check handler for WinChip C6: */
21684 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21685@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21686 {
21687 u32 lo, hi;
21688
21689+ pax_open_kernel();
21690 machine_check_vector = winchip_machine_check;
21691+ pax_close_kernel();
21692 /* Make sure the vector pointer is visible before we enable MCEs: */
21693 wmb();
21694
21695diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21696index 36a8361..e7058c2 100644
21697--- a/arch/x86/kernel/cpu/microcode/core.c
21698+++ b/arch/x86/kernel/cpu/microcode/core.c
21699@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21700 return NOTIFY_OK;
21701 }
21702
21703-static struct notifier_block __refdata mc_cpu_notifier = {
21704+static struct notifier_block mc_cpu_notifier = {
21705 .notifier_call = mc_cpu_callback,
21706 };
21707
21708diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21709index c6826d1..8dc677e 100644
21710--- a/arch/x86/kernel/cpu/microcode/intel.c
21711+++ b/arch/x86/kernel/cpu/microcode/intel.c
21712@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21713 struct microcode_header_intel mc_header;
21714 unsigned int mc_size;
21715
21716+ if (leftover < sizeof(mc_header)) {
21717+ pr_err("error! Truncated header in microcode data file\n");
21718+ break;
21719+ }
21720+
21721 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21722 break;
21723
21724@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21725
21726 static int get_ucode_user(void *to, const void *from, size_t n)
21727 {
21728- return copy_from_user(to, from, n);
21729+ return copy_from_user(to, (const void __force_user *)from, n);
21730 }
21731
21732 static enum ucode_state
21733 request_microcode_user(int cpu, const void __user *buf, size_t size)
21734 {
21735- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21736+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21737 }
21738
21739 static void microcode_fini_cpu(int cpu)
21740diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21741index ec9df6f..420eb93 100644
21742--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21743+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21744@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21745 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21746 int i;
21747
21748- while (leftover) {
21749+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21750+
21751+ if (leftover < sizeof(mc_header))
21752+ break;
21753+
21754 mc_header = (struct microcode_header_intel *)ucode_ptr;
21755
21756 mc_size = get_totalsize(mc_header);
21757diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21758index ea5f363..cb0e905 100644
21759--- a/arch/x86/kernel/cpu/mtrr/main.c
21760+++ b/arch/x86/kernel/cpu/mtrr/main.c
21761@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21762 u64 size_or_mask, size_and_mask;
21763 static bool mtrr_aps_delayed_init;
21764
21765-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21766+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21767
21768 const struct mtrr_ops *mtrr_if;
21769
21770diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21771index df5e41f..816c719 100644
21772--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21773+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21774@@ -25,7 +25,7 @@ struct mtrr_ops {
21775 int (*validate_add_page)(unsigned long base, unsigned long size,
21776 unsigned int type);
21777 int (*have_wrcomb)(void);
21778-};
21779+} __do_const;
21780
21781 extern int generic_get_free_region(unsigned long base, unsigned long size,
21782 int replace_reg);
21783diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21784index 143e5f5..5825081 100644
21785--- a/arch/x86/kernel/cpu/perf_event.c
21786+++ b/arch/x86/kernel/cpu/perf_event.c
21787@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21788
21789 }
21790
21791-static struct attribute_group x86_pmu_format_group = {
21792+static attribute_group_no_const x86_pmu_format_group = {
21793 .name = "format",
21794 .attrs = NULL,
21795 };
21796@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21797 NULL,
21798 };
21799
21800-static struct attribute_group x86_pmu_events_group = {
21801+static attribute_group_no_const x86_pmu_events_group = {
21802 .name = "events",
21803 .attrs = events_attr,
21804 };
21805@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21806 if (idx > GDT_ENTRIES)
21807 return 0;
21808
21809- desc = raw_cpu_ptr(gdt_page.gdt);
21810+ desc = get_cpu_gdt_table(smp_processor_id());
21811 }
21812
21813 return get_desc_base(desc + idx);
21814@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21815 break;
21816
21817 perf_callchain_store(entry, frame.return_address);
21818- fp = frame.next_frame;
21819+ fp = (const void __force_user *)frame.next_frame;
21820 }
21821 }
21822
21823diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21824index 97242a9..cf9c30e 100644
21825--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21826+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21827@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21828 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21829 {
21830 struct attribute **attrs;
21831- struct attribute_group *attr_group;
21832+ attribute_group_no_const *attr_group;
21833 int i = 0, j;
21834
21835 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21836diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21837index 498b6d9..4126515 100644
21838--- a/arch/x86/kernel/cpu/perf_event_intel.c
21839+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21840@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21841 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21842
21843 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21844- u64 capabilities;
21845+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21846
21847- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21848- x86_pmu.intel_cap.capabilities = capabilities;
21849+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21850+ x86_pmu.intel_cap.capabilities = capabilities;
21851 }
21852
21853 intel_ds_init();
21854diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21855index c4bb8b8..9f7384d 100644
21856--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21857+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21858@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21859 NULL,
21860 };
21861
21862-static struct attribute_group rapl_pmu_events_group = {
21863+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21864 .name = "events",
21865 .attrs = NULL, /* patched at runtime */
21866 };
21867diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21868index c635b8b..b78835e 100644
21869--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21870+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21871@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21872 static int __init uncore_type_init(struct intel_uncore_type *type)
21873 {
21874 struct intel_uncore_pmu *pmus;
21875- struct attribute_group *attr_group;
21876+ attribute_group_no_const *attr_group;
21877 struct attribute **attrs;
21878 int i, j;
21879
21880diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21881index 6c8c1e7..515b98a 100644
21882--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21883+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21884@@ -114,7 +114,7 @@ struct intel_uncore_box {
21885 struct uncore_event_desc {
21886 struct kobj_attribute attr;
21887 const char *config;
21888-};
21889+} __do_const;
21890
21891 ssize_t uncore_event_show(struct kobject *kobj,
21892 struct kobj_attribute *attr, char *buf);
21893diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21894index 83741a7..bd3507d 100644
21895--- a/arch/x86/kernel/cpuid.c
21896+++ b/arch/x86/kernel/cpuid.c
21897@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21898 return notifier_from_errno(err);
21899 }
21900
21901-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21902+static struct notifier_block cpuid_class_cpu_notifier =
21903 {
21904 .notifier_call = cpuid_class_cpu_callback,
21905 };
21906diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21907index aceb2f9..c76d3e3 100644
21908--- a/arch/x86/kernel/crash.c
21909+++ b/arch/x86/kernel/crash.c
21910@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21911 #ifdef CONFIG_X86_32
21912 struct pt_regs fixed_regs;
21913
21914- if (!user_mode_vm(regs)) {
21915+ if (!user_mode(regs)) {
21916 crash_fixup_ss_esp(&fixed_regs, regs);
21917 regs = &fixed_regs;
21918 }
21919diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21920index afa64ad..dce67dd 100644
21921--- a/arch/x86/kernel/crash_dump_64.c
21922+++ b/arch/x86/kernel/crash_dump_64.c
21923@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21924 return -ENOMEM;
21925
21926 if (userbuf) {
21927- if (copy_to_user(buf, vaddr + offset, csize)) {
21928+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21929 iounmap(vaddr);
21930 return -EFAULT;
21931 }
21932diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21933index f6dfd93..892ade4 100644
21934--- a/arch/x86/kernel/doublefault.c
21935+++ b/arch/x86/kernel/doublefault.c
21936@@ -12,7 +12,7 @@
21937
21938 #define DOUBLEFAULT_STACKSIZE (1024)
21939 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21940-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21941+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21942
21943 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21944
21945@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21946 unsigned long gdt, tss;
21947
21948 native_store_gdt(&gdt_desc);
21949- gdt = gdt_desc.address;
21950+ gdt = (unsigned long)gdt_desc.address;
21951
21952 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21953
21954@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21955 /* 0x2 bit is always set */
21956 .flags = X86_EFLAGS_SF | 0x2,
21957 .sp = STACK_START,
21958- .es = __USER_DS,
21959+ .es = __KERNEL_DS,
21960 .cs = __KERNEL_CS,
21961 .ss = __KERNEL_DS,
21962- .ds = __USER_DS,
21963+ .ds = __KERNEL_DS,
21964 .fs = __KERNEL_PERCPU,
21965
21966 .__cr3 = __pa_nodebug(swapper_pg_dir),
21967diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21968index b74ebc7..2c95874 100644
21969--- a/arch/x86/kernel/dumpstack.c
21970+++ b/arch/x86/kernel/dumpstack.c
21971@@ -2,6 +2,9 @@
21972 * Copyright (C) 1991, 1992 Linus Torvalds
21973 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21974 */
21975+#ifdef CONFIG_GRKERNSEC_HIDESYM
21976+#define __INCLUDED_BY_HIDESYM 1
21977+#endif
21978 #include <linux/kallsyms.h>
21979 #include <linux/kprobes.h>
21980 #include <linux/uaccess.h>
21981@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21982
21983 void printk_address(unsigned long address)
21984 {
21985- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21986+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21987 }
21988
21989 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21990 static void
21991 print_ftrace_graph_addr(unsigned long addr, void *data,
21992 const struct stacktrace_ops *ops,
21993- struct thread_info *tinfo, int *graph)
21994+ struct task_struct *task, int *graph)
21995 {
21996- struct task_struct *task;
21997 unsigned long ret_addr;
21998 int index;
21999
22000 if (addr != (unsigned long)return_to_handler)
22001 return;
22002
22003- task = tinfo->task;
22004 index = task->curr_ret_stack;
22005
22006 if (!task->ret_stack || index < *graph)
22007@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22008 static inline void
22009 print_ftrace_graph_addr(unsigned long addr, void *data,
22010 const struct stacktrace_ops *ops,
22011- struct thread_info *tinfo, int *graph)
22012+ struct task_struct *task, int *graph)
22013 { }
22014 #endif
22015
22016@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22017 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22018 */
22019
22020-static inline int valid_stack_ptr(struct thread_info *tinfo,
22021- void *p, unsigned int size, void *end)
22022+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22023 {
22024- void *t = tinfo;
22025 if (end) {
22026 if (p < end && p >= (end-THREAD_SIZE))
22027 return 1;
22028@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22029 }
22030
22031 unsigned long
22032-print_context_stack(struct thread_info *tinfo,
22033+print_context_stack(struct task_struct *task, void *stack_start,
22034 unsigned long *stack, unsigned long bp,
22035 const struct stacktrace_ops *ops, void *data,
22036 unsigned long *end, int *graph)
22037 {
22038 struct stack_frame *frame = (struct stack_frame *)bp;
22039
22040- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22041+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22042 unsigned long addr;
22043
22044 addr = *stack;
22045@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22046 } else {
22047 ops->address(data, addr, 0);
22048 }
22049- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22050+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22051 }
22052 stack++;
22053 }
22054@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22055 EXPORT_SYMBOL_GPL(print_context_stack);
22056
22057 unsigned long
22058-print_context_stack_bp(struct thread_info *tinfo,
22059+print_context_stack_bp(struct task_struct *task, void *stack_start,
22060 unsigned long *stack, unsigned long bp,
22061 const struct stacktrace_ops *ops, void *data,
22062 unsigned long *end, int *graph)
22063@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22064 struct stack_frame *frame = (struct stack_frame *)bp;
22065 unsigned long *ret_addr = &frame->return_address;
22066
22067- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22068+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22069 unsigned long addr = *ret_addr;
22070
22071 if (!__kernel_text_address(addr))
22072@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22073 ops->address(data, addr, 1);
22074 frame = frame->next_frame;
22075 ret_addr = &frame->return_address;
22076- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22077+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22078 }
22079
22080 return (unsigned long)frame;
22081@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22082 static void print_trace_address(void *data, unsigned long addr, int reliable)
22083 {
22084 touch_nmi_watchdog();
22085- printk(data);
22086+ printk("%s", (char *)data);
22087 printk_stack_address(addr, reliable);
22088 }
22089
22090@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22091 EXPORT_SYMBOL_GPL(oops_begin);
22092 NOKPROBE_SYMBOL(oops_begin);
22093
22094+extern void gr_handle_kernel_exploit(void);
22095+
22096 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22097 {
22098 if (regs && kexec_should_crash(current))
22099@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22100 panic("Fatal exception in interrupt");
22101 if (panic_on_oops)
22102 panic("Fatal exception");
22103- do_exit(signr);
22104+
22105+ gr_handle_kernel_exploit();
22106+
22107+ do_group_exit(signr);
22108 }
22109 NOKPROBE_SYMBOL(oops_end);
22110
22111@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22112 print_modules();
22113 show_regs(regs);
22114 #ifdef CONFIG_X86_32
22115- if (user_mode_vm(regs)) {
22116+ if (user_mode(regs)) {
22117 sp = regs->sp;
22118 ss = regs->ss & 0xffff;
22119 } else {
22120@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22121 unsigned long flags = oops_begin();
22122 int sig = SIGSEGV;
22123
22124- if (!user_mode_vm(regs))
22125+ if (!user_mode(regs))
22126 report_bug(regs->ip, regs);
22127
22128 if (__die(str, regs, err))
22129diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22130index 5abd4cd..c65733b 100644
22131--- a/arch/x86/kernel/dumpstack_32.c
22132+++ b/arch/x86/kernel/dumpstack_32.c
22133@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22134 bp = stack_frame(task, regs);
22135
22136 for (;;) {
22137- struct thread_info *context;
22138+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22139 void *end_stack;
22140
22141 end_stack = is_hardirq_stack(stack, cpu);
22142 if (!end_stack)
22143 end_stack = is_softirq_stack(stack, cpu);
22144
22145- context = task_thread_info(task);
22146- bp = ops->walk_stack(context, stack, bp, ops, data,
22147+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22148 end_stack, &graph);
22149
22150 /* Stop if not on irq stack */
22151@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22152 int i;
22153
22154 show_regs_print_info(KERN_EMERG);
22155- __show_regs(regs, !user_mode_vm(regs));
22156+ __show_regs(regs, !user_mode(regs));
22157
22158 /*
22159 * When in-kernel, we also print out the stack and code at the
22160 * time of the fault..
22161 */
22162- if (!user_mode_vm(regs)) {
22163+ if (!user_mode(regs)) {
22164 unsigned int code_prologue = code_bytes * 43 / 64;
22165 unsigned int code_len = code_bytes;
22166 unsigned char c;
22167 u8 *ip;
22168+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22169
22170 pr_emerg("Stack:\n");
22171 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22172
22173 pr_emerg("Code:");
22174
22175- ip = (u8 *)regs->ip - code_prologue;
22176+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22177 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22178 /* try starting at IP */
22179- ip = (u8 *)regs->ip;
22180+ ip = (u8 *)regs->ip + cs_base;
22181 code_len = code_len - code_prologue + 1;
22182 }
22183 for (i = 0; i < code_len; i++, ip++) {
22184@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22185 pr_cont(" Bad EIP value.");
22186 break;
22187 }
22188- if (ip == (u8 *)regs->ip)
22189+ if (ip == (u8 *)regs->ip + cs_base)
22190 pr_cont(" <%02x>", c);
22191 else
22192 pr_cont(" %02x", c);
22193@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22194 {
22195 unsigned short ud2;
22196
22197+ ip = ktla_ktva(ip);
22198 if (ip < PAGE_OFFSET)
22199 return 0;
22200 if (probe_kernel_address((unsigned short *)ip, ud2))
22201@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22202
22203 return ud2 == 0x0b0f;
22204 }
22205+
22206+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22207+void pax_check_alloca(unsigned long size)
22208+{
22209+ unsigned long sp = (unsigned long)&sp, stack_left;
22210+
22211+ /* all kernel stacks are of the same size */
22212+ stack_left = sp & (THREAD_SIZE - 1);
22213+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22214+}
22215+EXPORT_SYMBOL(pax_check_alloca);
22216+#endif
22217diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22218index ff86f19..73eabf4 100644
22219--- a/arch/x86/kernel/dumpstack_64.c
22220+++ b/arch/x86/kernel/dumpstack_64.c
22221@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22222 const struct stacktrace_ops *ops, void *data)
22223 {
22224 const unsigned cpu = get_cpu();
22225- struct thread_info *tinfo;
22226 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22227 unsigned long dummy;
22228 unsigned used = 0;
22229 int graph = 0;
22230 int done = 0;
22231+ void *stack_start;
22232
22233 if (!task)
22234 task = current;
22235@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22236 * current stack address. If the stacks consist of nested
22237 * exceptions
22238 */
22239- tinfo = task_thread_info(task);
22240 while (!done) {
22241 unsigned long *stack_end;
22242 enum stack_type stype;
22243@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22244 if (ops->stack(data, id) < 0)
22245 break;
22246
22247- bp = ops->walk_stack(tinfo, stack, bp, ops,
22248+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22249 data, stack_end, &graph);
22250 ops->stack(data, "<EOE>");
22251 /*
22252@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22253 * second-to-last pointer (index -2 to end) in the
22254 * exception stack:
22255 */
22256+ if ((u16)stack_end[-1] != __KERNEL_DS)
22257+ goto out;
22258 stack = (unsigned long *) stack_end[-2];
22259 done = 0;
22260 break;
22261@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22262
22263 if (ops->stack(data, "IRQ") < 0)
22264 break;
22265- bp = ops->walk_stack(tinfo, stack, bp,
22266+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22267 ops, data, stack_end, &graph);
22268 /*
22269 * We link to the next stack (which would be
22270@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22271 /*
22272 * This handles the process stack:
22273 */
22274- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22275+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22276+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22277+out:
22278 put_cpu();
22279 }
22280 EXPORT_SYMBOL(dump_trace);
22281@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22282 {
22283 unsigned short ud2;
22284
22285- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22286+ if (probe_kernel_address((unsigned short *)ip, ud2))
22287 return 0;
22288
22289 return ud2 == 0x0b0f;
22290 }
22291+
22292+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22293+void pax_check_alloca(unsigned long size)
22294+{
22295+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22296+ unsigned cpu, used;
22297+ char *id;
22298+
22299+ /* check the process stack first */
22300+ stack_start = (unsigned long)task_stack_page(current);
22301+ stack_end = stack_start + THREAD_SIZE;
22302+ if (likely(stack_start <= sp && sp < stack_end)) {
22303+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22304+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22305+ return;
22306+ }
22307+
22308+ cpu = get_cpu();
22309+
22310+ /* check the irq stacks */
22311+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22312+ stack_start = stack_end - IRQ_STACK_SIZE;
22313+ if (stack_start <= sp && sp < stack_end) {
22314+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22315+ put_cpu();
22316+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22317+ return;
22318+ }
22319+
22320+ /* check the exception stacks */
22321+ used = 0;
22322+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22323+ stack_start = stack_end - EXCEPTION_STKSZ;
22324+ if (stack_end && stack_start <= sp && sp < stack_end) {
22325+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22326+ put_cpu();
22327+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22328+ return;
22329+ }
22330+
22331+ put_cpu();
22332+
22333+ /* unknown stack */
22334+ BUG();
22335+}
22336+EXPORT_SYMBOL(pax_check_alloca);
22337+#endif
22338diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22339index dd2f07a..845dc05 100644
22340--- a/arch/x86/kernel/e820.c
22341+++ b/arch/x86/kernel/e820.c
22342@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22343
22344 static void early_panic(char *msg)
22345 {
22346- early_printk(msg);
22347- panic(msg);
22348+ early_printk("%s", msg);
22349+ panic("%s", msg);
22350 }
22351
22352 static int userdef __initdata;
22353diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22354index 01d1c18..8073693 100644
22355--- a/arch/x86/kernel/early_printk.c
22356+++ b/arch/x86/kernel/early_printk.c
22357@@ -7,6 +7,7 @@
22358 #include <linux/pci_regs.h>
22359 #include <linux/pci_ids.h>
22360 #include <linux/errno.h>
22361+#include <linux/sched.h>
22362 #include <asm/io.h>
22363 #include <asm/processor.h>
22364 #include <asm/fcntl.h>
22365diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22366index 000d419..8f66802 100644
22367--- a/arch/x86/kernel/entry_32.S
22368+++ b/arch/x86/kernel/entry_32.S
22369@@ -177,13 +177,154 @@
22370 /*CFI_REL_OFFSET gs, PT_GS*/
22371 .endm
22372 .macro SET_KERNEL_GS reg
22373+
22374+#ifdef CONFIG_CC_STACKPROTECTOR
22375 movl $(__KERNEL_STACK_CANARY), \reg
22376+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22377+ movl $(__USER_DS), \reg
22378+#else
22379+ xorl \reg, \reg
22380+#endif
22381+
22382 movl \reg, %gs
22383 .endm
22384
22385 #endif /* CONFIG_X86_32_LAZY_GS */
22386
22387-.macro SAVE_ALL
22388+.macro pax_enter_kernel
22389+#ifdef CONFIG_PAX_KERNEXEC
22390+ call pax_enter_kernel
22391+#endif
22392+.endm
22393+
22394+.macro pax_exit_kernel
22395+#ifdef CONFIG_PAX_KERNEXEC
22396+ call pax_exit_kernel
22397+#endif
22398+.endm
22399+
22400+#ifdef CONFIG_PAX_KERNEXEC
22401+ENTRY(pax_enter_kernel)
22402+#ifdef CONFIG_PARAVIRT
22403+ pushl %eax
22404+ pushl %ecx
22405+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22406+ mov %eax, %esi
22407+#else
22408+ mov %cr0, %esi
22409+#endif
22410+ bts $16, %esi
22411+ jnc 1f
22412+ mov %cs, %esi
22413+ cmp $__KERNEL_CS, %esi
22414+ jz 3f
22415+ ljmp $__KERNEL_CS, $3f
22416+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22417+2:
22418+#ifdef CONFIG_PARAVIRT
22419+ mov %esi, %eax
22420+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22421+#else
22422+ mov %esi, %cr0
22423+#endif
22424+3:
22425+#ifdef CONFIG_PARAVIRT
22426+ popl %ecx
22427+ popl %eax
22428+#endif
22429+ ret
22430+ENDPROC(pax_enter_kernel)
22431+
22432+ENTRY(pax_exit_kernel)
22433+#ifdef CONFIG_PARAVIRT
22434+ pushl %eax
22435+ pushl %ecx
22436+#endif
22437+ mov %cs, %esi
22438+ cmp $__KERNEXEC_KERNEL_CS, %esi
22439+ jnz 2f
22440+#ifdef CONFIG_PARAVIRT
22441+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22442+ mov %eax, %esi
22443+#else
22444+ mov %cr0, %esi
22445+#endif
22446+ btr $16, %esi
22447+ ljmp $__KERNEL_CS, $1f
22448+1:
22449+#ifdef CONFIG_PARAVIRT
22450+ mov %esi, %eax
22451+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22452+#else
22453+ mov %esi, %cr0
22454+#endif
22455+2:
22456+#ifdef CONFIG_PARAVIRT
22457+ popl %ecx
22458+ popl %eax
22459+#endif
22460+ ret
22461+ENDPROC(pax_exit_kernel)
22462+#endif
22463+
22464+ .macro pax_erase_kstack
22465+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22466+ call pax_erase_kstack
22467+#endif
22468+ .endm
22469+
22470+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22471+/*
22472+ * ebp: thread_info
22473+ */
22474+ENTRY(pax_erase_kstack)
22475+ pushl %edi
22476+ pushl %ecx
22477+ pushl %eax
22478+
22479+ mov TI_lowest_stack(%ebp), %edi
22480+ mov $-0xBEEF, %eax
22481+ std
22482+
22483+1: mov %edi, %ecx
22484+ and $THREAD_SIZE_asm - 1, %ecx
22485+ shr $2, %ecx
22486+ repne scasl
22487+ jecxz 2f
22488+
22489+ cmp $2*16, %ecx
22490+ jc 2f
22491+
22492+ mov $2*16, %ecx
22493+ repe scasl
22494+ jecxz 2f
22495+ jne 1b
22496+
22497+2: cld
22498+ or $2*4, %edi
22499+ mov %esp, %ecx
22500+ sub %edi, %ecx
22501+
22502+ cmp $THREAD_SIZE_asm, %ecx
22503+ jb 3f
22504+ ud2
22505+3:
22506+
22507+ shr $2, %ecx
22508+ rep stosl
22509+
22510+ mov TI_task_thread_sp0(%ebp), %edi
22511+ sub $128, %edi
22512+ mov %edi, TI_lowest_stack(%ebp)
22513+
22514+ popl %eax
22515+ popl %ecx
22516+ popl %edi
22517+ ret
22518+ENDPROC(pax_erase_kstack)
22519+#endif
22520+
22521+.macro __SAVE_ALL _DS
22522 cld
22523 PUSH_GS
22524 pushl_cfi %fs
22525@@ -206,7 +347,7 @@
22526 CFI_REL_OFFSET ecx, 0
22527 pushl_cfi %ebx
22528 CFI_REL_OFFSET ebx, 0
22529- movl $(__USER_DS), %edx
22530+ movl $\_DS, %edx
22531 movl %edx, %ds
22532 movl %edx, %es
22533 movl $(__KERNEL_PERCPU), %edx
22534@@ -214,6 +355,15 @@
22535 SET_KERNEL_GS %edx
22536 .endm
22537
22538+.macro SAVE_ALL
22539+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22540+ __SAVE_ALL __KERNEL_DS
22541+ pax_enter_kernel
22542+#else
22543+ __SAVE_ALL __USER_DS
22544+#endif
22545+.endm
22546+
22547 .macro RESTORE_INT_REGS
22548 popl_cfi %ebx
22549 CFI_RESTORE ebx
22550@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22551 popfl_cfi
22552 jmp syscall_exit
22553 CFI_ENDPROC
22554-END(ret_from_fork)
22555+ENDPROC(ret_from_fork)
22556
22557 ENTRY(ret_from_kernel_thread)
22558 CFI_STARTPROC
22559@@ -340,7 +490,15 @@ ret_from_intr:
22560 andl $SEGMENT_RPL_MASK, %eax
22561 #endif
22562 cmpl $USER_RPL, %eax
22563+
22564+#ifdef CONFIG_PAX_KERNEXEC
22565+ jae resume_userspace
22566+
22567+ pax_exit_kernel
22568+ jmp resume_kernel
22569+#else
22570 jb resume_kernel # not returning to v8086 or userspace
22571+#endif
22572
22573 ENTRY(resume_userspace)
22574 LOCKDEP_SYS_EXIT
22575@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22576 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22577 # int/exception return?
22578 jne work_pending
22579- jmp restore_all
22580-END(ret_from_exception)
22581+ jmp restore_all_pax
22582+ENDPROC(ret_from_exception)
22583
22584 #ifdef CONFIG_PREEMPT
22585 ENTRY(resume_kernel)
22586@@ -365,7 +523,7 @@ need_resched:
22587 jz restore_all
22588 call preempt_schedule_irq
22589 jmp need_resched
22590-END(resume_kernel)
22591+ENDPROC(resume_kernel)
22592 #endif
22593 CFI_ENDPROC
22594
22595@@ -395,30 +553,45 @@ sysenter_past_esp:
22596 /*CFI_REL_OFFSET cs, 0*/
22597 /*
22598 * Push current_thread_info()->sysenter_return to the stack.
22599- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22600- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22601 */
22602- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22603+ pushl_cfi $0
22604 CFI_REL_OFFSET eip, 0
22605
22606 pushl_cfi %eax
22607 SAVE_ALL
22608+ GET_THREAD_INFO(%ebp)
22609+ movl TI_sysenter_return(%ebp),%ebp
22610+ movl %ebp,PT_EIP(%esp)
22611 ENABLE_INTERRUPTS(CLBR_NONE)
22612
22613 /*
22614 * Load the potential sixth argument from user stack.
22615 * Careful about security.
22616 */
22617+ movl PT_OLDESP(%esp),%ebp
22618+
22619+#ifdef CONFIG_PAX_MEMORY_UDEREF
22620+ mov PT_OLDSS(%esp),%ds
22621+1: movl %ds:(%ebp),%ebp
22622+ push %ss
22623+ pop %ds
22624+#else
22625 cmpl $__PAGE_OFFSET-3,%ebp
22626 jae syscall_fault
22627 ASM_STAC
22628 1: movl (%ebp),%ebp
22629 ASM_CLAC
22630+#endif
22631+
22632 movl %ebp,PT_EBP(%esp)
22633 _ASM_EXTABLE(1b,syscall_fault)
22634
22635 GET_THREAD_INFO(%ebp)
22636
22637+#ifdef CONFIG_PAX_RANDKSTACK
22638+ pax_erase_kstack
22639+#endif
22640+
22641 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22642 jnz sysenter_audit
22643 sysenter_do_call:
22644@@ -434,12 +607,24 @@ sysenter_after_call:
22645 testl $_TIF_ALLWORK_MASK, %ecx
22646 jne sysexit_audit
22647 sysenter_exit:
22648+
22649+#ifdef CONFIG_PAX_RANDKSTACK
22650+ pushl_cfi %eax
22651+ movl %esp, %eax
22652+ call pax_randomize_kstack
22653+ popl_cfi %eax
22654+#endif
22655+
22656+ pax_erase_kstack
22657+
22658 /* if something modifies registers it must also disable sysexit */
22659 movl PT_EIP(%esp), %edx
22660 movl PT_OLDESP(%esp), %ecx
22661 xorl %ebp,%ebp
22662 TRACE_IRQS_ON
22663 1: mov PT_FS(%esp), %fs
22664+2: mov PT_DS(%esp), %ds
22665+3: mov PT_ES(%esp), %es
22666 PTGS_TO_GS
22667 ENABLE_INTERRUPTS_SYSEXIT
22668
22669@@ -453,6 +638,9 @@ sysenter_audit:
22670 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22671 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22672 call __audit_syscall_entry
22673+
22674+ pax_erase_kstack
22675+
22676 popl_cfi %ecx /* get that remapped edx off the stack */
22677 popl_cfi %ecx /* get that remapped esi off the stack */
22678 movl PT_EAX(%esp),%eax /* reload syscall number */
22679@@ -479,10 +667,16 @@ sysexit_audit:
22680
22681 CFI_ENDPROC
22682 .pushsection .fixup,"ax"
22683-2: movl $0,PT_FS(%esp)
22684+4: movl $0,PT_FS(%esp)
22685+ jmp 1b
22686+5: movl $0,PT_DS(%esp)
22687+ jmp 1b
22688+6: movl $0,PT_ES(%esp)
22689 jmp 1b
22690 .popsection
22691- _ASM_EXTABLE(1b,2b)
22692+ _ASM_EXTABLE(1b,4b)
22693+ _ASM_EXTABLE(2b,5b)
22694+ _ASM_EXTABLE(3b,6b)
22695 PTGS_TO_GS_EX
22696 ENDPROC(ia32_sysenter_target)
22697
22698@@ -493,6 +687,11 @@ ENTRY(system_call)
22699 pushl_cfi %eax # save orig_eax
22700 SAVE_ALL
22701 GET_THREAD_INFO(%ebp)
22702+
22703+#ifdef CONFIG_PAX_RANDKSTACK
22704+ pax_erase_kstack
22705+#endif
22706+
22707 # system call tracing in operation / emulation
22708 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22709 jnz syscall_trace_entry
22710@@ -512,6 +711,15 @@ syscall_exit:
22711 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22712 jne syscall_exit_work
22713
22714+restore_all_pax:
22715+
22716+#ifdef CONFIG_PAX_RANDKSTACK
22717+ movl %esp, %eax
22718+ call pax_randomize_kstack
22719+#endif
22720+
22721+ pax_erase_kstack
22722+
22723 restore_all:
22724 TRACE_IRQS_IRET
22725 restore_all_notrace:
22726@@ -566,14 +774,34 @@ ldt_ss:
22727 * compensating for the offset by changing to the ESPFIX segment with
22728 * a base address that matches for the difference.
22729 */
22730-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22731+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22732 mov %esp, %edx /* load kernel esp */
22733 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22734 mov %dx, %ax /* eax: new kernel esp */
22735 sub %eax, %edx /* offset (low word is 0) */
22736+#ifdef CONFIG_SMP
22737+ movl PER_CPU_VAR(cpu_number), %ebx
22738+ shll $PAGE_SHIFT_asm, %ebx
22739+ addl $cpu_gdt_table, %ebx
22740+#else
22741+ movl $cpu_gdt_table, %ebx
22742+#endif
22743 shr $16, %edx
22744- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22745- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22746+
22747+#ifdef CONFIG_PAX_KERNEXEC
22748+ mov %cr0, %esi
22749+ btr $16, %esi
22750+ mov %esi, %cr0
22751+#endif
22752+
22753+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22754+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22755+
22756+#ifdef CONFIG_PAX_KERNEXEC
22757+ bts $16, %esi
22758+ mov %esi, %cr0
22759+#endif
22760+
22761 pushl_cfi $__ESPFIX_SS
22762 pushl_cfi %eax /* new kernel esp */
22763 /* Disable interrupts, but do not irqtrace this section: we
22764@@ -603,20 +831,18 @@ work_resched:
22765 movl TI_flags(%ebp), %ecx
22766 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22767 # than syscall tracing?
22768- jz restore_all
22769+ jz restore_all_pax
22770 testb $_TIF_NEED_RESCHED, %cl
22771 jnz work_resched
22772
22773 work_notifysig: # deal with pending signals and
22774 # notify-resume requests
22775+ movl %esp, %eax
22776 #ifdef CONFIG_VM86
22777 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22778- movl %esp, %eax
22779 jne work_notifysig_v86 # returning to kernel-space or
22780 # vm86-space
22781 1:
22782-#else
22783- movl %esp, %eax
22784 #endif
22785 TRACE_IRQS_ON
22786 ENABLE_INTERRUPTS(CLBR_NONE)
22787@@ -637,7 +863,7 @@ work_notifysig_v86:
22788 movl %eax, %esp
22789 jmp 1b
22790 #endif
22791-END(work_pending)
22792+ENDPROC(work_pending)
22793
22794 # perform syscall exit tracing
22795 ALIGN
22796@@ -645,11 +871,14 @@ syscall_trace_entry:
22797 movl $-ENOSYS,PT_EAX(%esp)
22798 movl %esp, %eax
22799 call syscall_trace_enter
22800+
22801+ pax_erase_kstack
22802+
22803 /* What it returned is what we'll actually use. */
22804 cmpl $(NR_syscalls), %eax
22805 jnae syscall_call
22806 jmp syscall_exit
22807-END(syscall_trace_entry)
22808+ENDPROC(syscall_trace_entry)
22809
22810 # perform syscall exit tracing
22811 ALIGN
22812@@ -662,26 +891,30 @@ syscall_exit_work:
22813 movl %esp, %eax
22814 call syscall_trace_leave
22815 jmp resume_userspace
22816-END(syscall_exit_work)
22817+ENDPROC(syscall_exit_work)
22818 CFI_ENDPROC
22819
22820 RING0_INT_FRAME # can't unwind into user space anyway
22821 syscall_fault:
22822+#ifdef CONFIG_PAX_MEMORY_UDEREF
22823+ push %ss
22824+ pop %ds
22825+#endif
22826 ASM_CLAC
22827 GET_THREAD_INFO(%ebp)
22828 movl $-EFAULT,PT_EAX(%esp)
22829 jmp resume_userspace
22830-END(syscall_fault)
22831+ENDPROC(syscall_fault)
22832
22833 syscall_badsys:
22834 movl $-ENOSYS,%eax
22835 jmp syscall_after_call
22836-END(syscall_badsys)
22837+ENDPROC(syscall_badsys)
22838
22839 sysenter_badsys:
22840 movl $-ENOSYS,%eax
22841 jmp sysenter_after_call
22842-END(sysenter_badsys)
22843+ENDPROC(sysenter_badsys)
22844 CFI_ENDPROC
22845
22846 .macro FIXUP_ESPFIX_STACK
22847@@ -694,8 +927,15 @@ END(sysenter_badsys)
22848 */
22849 #ifdef CONFIG_X86_ESPFIX32
22850 /* fixup the stack */
22851- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22852- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22853+#ifdef CONFIG_SMP
22854+ movl PER_CPU_VAR(cpu_number), %ebx
22855+ shll $PAGE_SHIFT_asm, %ebx
22856+ addl $cpu_gdt_table, %ebx
22857+#else
22858+ movl $cpu_gdt_table, %ebx
22859+#endif
22860+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22861+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22862 shl $16, %eax
22863 addl %esp, %eax /* the adjusted stack pointer */
22864 pushl_cfi $__KERNEL_DS
22865@@ -751,7 +991,7 @@ vector=vector+1
22866 .endr
22867 2: jmp common_interrupt
22868 .endr
22869-END(irq_entries_start)
22870+ENDPROC(irq_entries_start)
22871
22872 .previous
22873 END(interrupt)
22874@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22875 pushl_cfi $do_coprocessor_error
22876 jmp error_code
22877 CFI_ENDPROC
22878-END(coprocessor_error)
22879+ENDPROC(coprocessor_error)
22880
22881 ENTRY(simd_coprocessor_error)
22882 RING0_INT_FRAME
22883@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22884 .section .altinstructions,"a"
22885 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22886 .previous
22887-.section .altinstr_replacement,"ax"
22888+.section .altinstr_replacement,"a"
22889 663: pushl $do_simd_coprocessor_error
22890 664:
22891 .previous
22892@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22893 #endif
22894 jmp error_code
22895 CFI_ENDPROC
22896-END(simd_coprocessor_error)
22897+ENDPROC(simd_coprocessor_error)
22898
22899 ENTRY(device_not_available)
22900 RING0_INT_FRAME
22901@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22902 pushl_cfi $do_device_not_available
22903 jmp error_code
22904 CFI_ENDPROC
22905-END(device_not_available)
22906+ENDPROC(device_not_available)
22907
22908 #ifdef CONFIG_PARAVIRT
22909 ENTRY(native_iret)
22910 iret
22911 _ASM_EXTABLE(native_iret, iret_exc)
22912-END(native_iret)
22913+ENDPROC(native_iret)
22914
22915 ENTRY(native_irq_enable_sysexit)
22916 sti
22917 sysexit
22918-END(native_irq_enable_sysexit)
22919+ENDPROC(native_irq_enable_sysexit)
22920 #endif
22921
22922 ENTRY(overflow)
22923@@ -860,7 +1100,7 @@ ENTRY(overflow)
22924 pushl_cfi $do_overflow
22925 jmp error_code
22926 CFI_ENDPROC
22927-END(overflow)
22928+ENDPROC(overflow)
22929
22930 ENTRY(bounds)
22931 RING0_INT_FRAME
22932@@ -869,7 +1109,7 @@ ENTRY(bounds)
22933 pushl_cfi $do_bounds
22934 jmp error_code
22935 CFI_ENDPROC
22936-END(bounds)
22937+ENDPROC(bounds)
22938
22939 ENTRY(invalid_op)
22940 RING0_INT_FRAME
22941@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22942 pushl_cfi $do_invalid_op
22943 jmp error_code
22944 CFI_ENDPROC
22945-END(invalid_op)
22946+ENDPROC(invalid_op)
22947
22948 ENTRY(coprocessor_segment_overrun)
22949 RING0_INT_FRAME
22950@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22951 pushl_cfi $do_coprocessor_segment_overrun
22952 jmp error_code
22953 CFI_ENDPROC
22954-END(coprocessor_segment_overrun)
22955+ENDPROC(coprocessor_segment_overrun)
22956
22957 ENTRY(invalid_TSS)
22958 RING0_EC_FRAME
22959@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22960 pushl_cfi $do_invalid_TSS
22961 jmp error_code
22962 CFI_ENDPROC
22963-END(invalid_TSS)
22964+ENDPROC(invalid_TSS)
22965
22966 ENTRY(segment_not_present)
22967 RING0_EC_FRAME
22968@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22969 pushl_cfi $do_segment_not_present
22970 jmp error_code
22971 CFI_ENDPROC
22972-END(segment_not_present)
22973+ENDPROC(segment_not_present)
22974
22975 ENTRY(stack_segment)
22976 RING0_EC_FRAME
22977@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22978 pushl_cfi $do_stack_segment
22979 jmp error_code
22980 CFI_ENDPROC
22981-END(stack_segment)
22982+ENDPROC(stack_segment)
22983
22984 ENTRY(alignment_check)
22985 RING0_EC_FRAME
22986@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22987 pushl_cfi $do_alignment_check
22988 jmp error_code
22989 CFI_ENDPROC
22990-END(alignment_check)
22991+ENDPROC(alignment_check)
22992
22993 ENTRY(divide_error)
22994 RING0_INT_FRAME
22995@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22996 pushl_cfi $do_divide_error
22997 jmp error_code
22998 CFI_ENDPROC
22999-END(divide_error)
23000+ENDPROC(divide_error)
23001
23002 #ifdef CONFIG_X86_MCE
23003 ENTRY(machine_check)
23004@@ -938,7 +1178,7 @@ ENTRY(machine_check)
23005 pushl_cfi machine_check_vector
23006 jmp error_code
23007 CFI_ENDPROC
23008-END(machine_check)
23009+ENDPROC(machine_check)
23010 #endif
23011
23012 ENTRY(spurious_interrupt_bug)
23013@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
23014 pushl_cfi $do_spurious_interrupt_bug
23015 jmp error_code
23016 CFI_ENDPROC
23017-END(spurious_interrupt_bug)
23018+ENDPROC(spurious_interrupt_bug)
23019
23020 #ifdef CONFIG_XEN
23021 /* Xen doesn't set %esp to be precisely what the normal sysenter
23022@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23023
23024 ENTRY(mcount)
23025 ret
23026-END(mcount)
23027+ENDPROC(mcount)
23028
23029 ENTRY(ftrace_caller)
23030 pushl %eax
23031@@ -1084,7 +1324,7 @@ ftrace_graph_call:
23032 .globl ftrace_stub
23033 ftrace_stub:
23034 ret
23035-END(ftrace_caller)
23036+ENDPROC(ftrace_caller)
23037
23038 ENTRY(ftrace_regs_caller)
23039 pushf /* push flags before compare (in cs location) */
23040@@ -1182,7 +1422,7 @@ trace:
23041 popl %ecx
23042 popl %eax
23043 jmp ftrace_stub
23044-END(mcount)
23045+ENDPROC(mcount)
23046 #endif /* CONFIG_DYNAMIC_FTRACE */
23047 #endif /* CONFIG_FUNCTION_TRACER */
23048
23049@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
23050 popl %ecx
23051 popl %eax
23052 ret
23053-END(ftrace_graph_caller)
23054+ENDPROC(ftrace_graph_caller)
23055
23056 .globl return_to_handler
23057 return_to_handler:
23058@@ -1261,15 +1501,18 @@ error_code:
23059 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23060 REG_TO_PTGS %ecx
23061 SET_KERNEL_GS %ecx
23062- movl $(__USER_DS), %ecx
23063+ movl $(__KERNEL_DS), %ecx
23064 movl %ecx, %ds
23065 movl %ecx, %es
23066+
23067+ pax_enter_kernel
23068+
23069 TRACE_IRQS_OFF
23070 movl %esp,%eax # pt_regs pointer
23071 call *%edi
23072 jmp ret_from_exception
23073 CFI_ENDPROC
23074-END(page_fault)
23075+ENDPROC(page_fault)
23076
23077 /*
23078 * Debug traps and NMI can happen at the one SYSENTER instruction
23079@@ -1312,7 +1555,7 @@ debug_stack_correct:
23080 call do_debug
23081 jmp ret_from_exception
23082 CFI_ENDPROC
23083-END(debug)
23084+ENDPROC(debug)
23085
23086 /*
23087 * NMI is doubly nasty. It can happen _while_ we're handling
23088@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23089 xorl %edx,%edx # zero error code
23090 movl %esp,%eax # pt_regs pointer
23091 call do_nmi
23092+
23093+ pax_exit_kernel
23094+
23095 jmp restore_all_notrace
23096 CFI_ENDPROC
23097
23098@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23099 FIXUP_ESPFIX_STACK # %eax == %esp
23100 xorl %edx,%edx # zero error code
23101 call do_nmi
23102+
23103+ pax_exit_kernel
23104+
23105 RESTORE_REGS
23106 lss 12+4(%esp), %esp # back to espfix stack
23107 CFI_ADJUST_CFA_OFFSET -24
23108 jmp irq_return
23109 #endif
23110 CFI_ENDPROC
23111-END(nmi)
23112+ENDPROC(nmi)
23113
23114 ENTRY(int3)
23115 RING0_INT_FRAME
23116@@ -1408,14 +1657,14 @@ ENTRY(int3)
23117 call do_int3
23118 jmp ret_from_exception
23119 CFI_ENDPROC
23120-END(int3)
23121+ENDPROC(int3)
23122
23123 ENTRY(general_protection)
23124 RING0_EC_FRAME
23125 pushl_cfi $do_general_protection
23126 jmp error_code
23127 CFI_ENDPROC
23128-END(general_protection)
23129+ENDPROC(general_protection)
23130
23131 #ifdef CONFIG_KVM_GUEST
23132 ENTRY(async_page_fault)
23133@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23134 pushl_cfi $do_async_page_fault
23135 jmp error_code
23136 CFI_ENDPROC
23137-END(async_page_fault)
23138+ENDPROC(async_page_fault)
23139 #endif
23140
23141diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23142index 4ee9a23..c786610 100644
23143--- a/arch/x86/kernel/entry_64.S
23144+++ b/arch/x86/kernel/entry_64.S
23145@@ -59,6 +59,8 @@
23146 #include <asm/smap.h>
23147 #include <asm/pgtable_types.h>
23148 #include <linux/err.h>
23149+#include <asm/pgtable.h>
23150+#include <asm/alternative-asm.h>
23151
23152 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23153 #include <linux/elf-em.h>
23154@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23155 ENDPROC(native_usergs_sysret64)
23156 #endif /* CONFIG_PARAVIRT */
23157
23158+ .macro ljmpq sel, off
23159+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23160+ .byte 0x48; ljmp *1234f(%rip)
23161+ .pushsection .rodata
23162+ .align 16
23163+ 1234: .quad \off; .word \sel
23164+ .popsection
23165+#else
23166+ pushq $\sel
23167+ pushq $\off
23168+ lretq
23169+#endif
23170+ .endm
23171+
23172+ .macro pax_enter_kernel
23173+ pax_set_fptr_mask
23174+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23175+ call pax_enter_kernel
23176+#endif
23177+ .endm
23178+
23179+ .macro pax_exit_kernel
23180+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23181+ call pax_exit_kernel
23182+#endif
23183+
23184+ .endm
23185+
23186+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23187+ENTRY(pax_enter_kernel)
23188+ pushq %rdi
23189+
23190+#ifdef CONFIG_PARAVIRT
23191+ PV_SAVE_REGS(CLBR_RDI)
23192+#endif
23193+
23194+#ifdef CONFIG_PAX_KERNEXEC
23195+ GET_CR0_INTO_RDI
23196+ bts $16,%rdi
23197+ jnc 3f
23198+ mov %cs,%edi
23199+ cmp $__KERNEL_CS,%edi
23200+ jnz 2f
23201+1:
23202+#endif
23203+
23204+#ifdef CONFIG_PAX_MEMORY_UDEREF
23205+ 661: jmp 111f
23206+ .pushsection .altinstr_replacement, "a"
23207+ 662: ASM_NOP2
23208+ .popsection
23209+ .pushsection .altinstructions, "a"
23210+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23211+ .popsection
23212+ GET_CR3_INTO_RDI
23213+ cmp $0,%dil
23214+ jnz 112f
23215+ mov $__KERNEL_DS,%edi
23216+ mov %edi,%ss
23217+ jmp 111f
23218+112: cmp $1,%dil
23219+ jz 113f
23220+ ud2
23221+113: sub $4097,%rdi
23222+ bts $63,%rdi
23223+ SET_RDI_INTO_CR3
23224+ mov $__UDEREF_KERNEL_DS,%edi
23225+ mov %edi,%ss
23226+111:
23227+#endif
23228+
23229+#ifdef CONFIG_PARAVIRT
23230+ PV_RESTORE_REGS(CLBR_RDI)
23231+#endif
23232+
23233+ popq %rdi
23234+ pax_force_retaddr
23235+ retq
23236+
23237+#ifdef CONFIG_PAX_KERNEXEC
23238+2: ljmpq __KERNEL_CS,1b
23239+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23240+4: SET_RDI_INTO_CR0
23241+ jmp 1b
23242+#endif
23243+ENDPROC(pax_enter_kernel)
23244+
23245+ENTRY(pax_exit_kernel)
23246+ pushq %rdi
23247+
23248+#ifdef CONFIG_PARAVIRT
23249+ PV_SAVE_REGS(CLBR_RDI)
23250+#endif
23251+
23252+#ifdef CONFIG_PAX_KERNEXEC
23253+ mov %cs,%rdi
23254+ cmp $__KERNEXEC_KERNEL_CS,%edi
23255+ jz 2f
23256+ GET_CR0_INTO_RDI
23257+ bts $16,%rdi
23258+ jnc 4f
23259+1:
23260+#endif
23261+
23262+#ifdef CONFIG_PAX_MEMORY_UDEREF
23263+ 661: jmp 111f
23264+ .pushsection .altinstr_replacement, "a"
23265+ 662: ASM_NOP2
23266+ .popsection
23267+ .pushsection .altinstructions, "a"
23268+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23269+ .popsection
23270+ mov %ss,%edi
23271+ cmp $__UDEREF_KERNEL_DS,%edi
23272+ jnz 111f
23273+ GET_CR3_INTO_RDI
23274+ cmp $0,%dil
23275+ jz 112f
23276+ ud2
23277+112: add $4097,%rdi
23278+ bts $63,%rdi
23279+ SET_RDI_INTO_CR3
23280+ mov $__KERNEL_DS,%edi
23281+ mov %edi,%ss
23282+111:
23283+#endif
23284+
23285+#ifdef CONFIG_PARAVIRT
23286+ PV_RESTORE_REGS(CLBR_RDI);
23287+#endif
23288+
23289+ popq %rdi
23290+ pax_force_retaddr
23291+ retq
23292+
23293+#ifdef CONFIG_PAX_KERNEXEC
23294+2: GET_CR0_INTO_RDI
23295+ btr $16,%rdi
23296+ jnc 4f
23297+ ljmpq __KERNEL_CS,3f
23298+3: SET_RDI_INTO_CR0
23299+ jmp 1b
23300+4: ud2
23301+ jmp 4b
23302+#endif
23303+ENDPROC(pax_exit_kernel)
23304+#endif
23305+
23306+ .macro pax_enter_kernel_user
23307+ pax_set_fptr_mask
23308+#ifdef CONFIG_PAX_MEMORY_UDEREF
23309+ call pax_enter_kernel_user
23310+#endif
23311+ .endm
23312+
23313+ .macro pax_exit_kernel_user
23314+#ifdef CONFIG_PAX_MEMORY_UDEREF
23315+ call pax_exit_kernel_user
23316+#endif
23317+#ifdef CONFIG_PAX_RANDKSTACK
23318+ pushq %rax
23319+ pushq %r11
23320+ call pax_randomize_kstack
23321+ popq %r11
23322+ popq %rax
23323+#endif
23324+ .endm
23325+
23326+#ifdef CONFIG_PAX_MEMORY_UDEREF
23327+ENTRY(pax_enter_kernel_user)
23328+ pushq %rdi
23329+ pushq %rbx
23330+
23331+#ifdef CONFIG_PARAVIRT
23332+ PV_SAVE_REGS(CLBR_RDI)
23333+#endif
23334+
23335+ 661: jmp 111f
23336+ .pushsection .altinstr_replacement, "a"
23337+ 662: ASM_NOP2
23338+ .popsection
23339+ .pushsection .altinstructions, "a"
23340+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23341+ .popsection
23342+ GET_CR3_INTO_RDI
23343+ cmp $1,%dil
23344+ jnz 4f
23345+ sub $4097,%rdi
23346+ bts $63,%rdi
23347+ SET_RDI_INTO_CR3
23348+ jmp 3f
23349+111:
23350+
23351+ GET_CR3_INTO_RDI
23352+ mov %rdi,%rbx
23353+ add $__START_KERNEL_map,%rbx
23354+ sub phys_base(%rip),%rbx
23355+
23356+#ifdef CONFIG_PARAVIRT
23357+ cmpl $0, pv_info+PARAVIRT_enabled
23358+ jz 1f
23359+ pushq %rdi
23360+ i = 0
23361+ .rept USER_PGD_PTRS
23362+ mov i*8(%rbx),%rsi
23363+ mov $0,%sil
23364+ lea i*8(%rbx),%rdi
23365+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23366+ i = i + 1
23367+ .endr
23368+ popq %rdi
23369+ jmp 2f
23370+1:
23371+#endif
23372+
23373+ i = 0
23374+ .rept USER_PGD_PTRS
23375+ movb $0,i*8(%rbx)
23376+ i = i + 1
23377+ .endr
23378+
23379+2: SET_RDI_INTO_CR3
23380+
23381+#ifdef CONFIG_PAX_KERNEXEC
23382+ GET_CR0_INTO_RDI
23383+ bts $16,%rdi
23384+ SET_RDI_INTO_CR0
23385+#endif
23386+
23387+3:
23388+
23389+#ifdef CONFIG_PARAVIRT
23390+ PV_RESTORE_REGS(CLBR_RDI)
23391+#endif
23392+
23393+ popq %rbx
23394+ popq %rdi
23395+ pax_force_retaddr
23396+ retq
23397+4: ud2
23398+ENDPROC(pax_enter_kernel_user)
23399+
23400+ENTRY(pax_exit_kernel_user)
23401+ pushq %rdi
23402+ pushq %rbx
23403+
23404+#ifdef CONFIG_PARAVIRT
23405+ PV_SAVE_REGS(CLBR_RDI)
23406+#endif
23407+
23408+ GET_CR3_INTO_RDI
23409+ 661: jmp 1f
23410+ .pushsection .altinstr_replacement, "a"
23411+ 662: ASM_NOP2
23412+ .popsection
23413+ .pushsection .altinstructions, "a"
23414+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23415+ .popsection
23416+ cmp $0,%dil
23417+ jnz 3f
23418+ add $4097,%rdi
23419+ bts $63,%rdi
23420+ SET_RDI_INTO_CR3
23421+ jmp 2f
23422+1:
23423+
23424+ mov %rdi,%rbx
23425+
23426+#ifdef CONFIG_PAX_KERNEXEC
23427+ GET_CR0_INTO_RDI
23428+ btr $16,%rdi
23429+ jnc 3f
23430+ SET_RDI_INTO_CR0
23431+#endif
23432+
23433+ add $__START_KERNEL_map,%rbx
23434+ sub phys_base(%rip),%rbx
23435+
23436+#ifdef CONFIG_PARAVIRT
23437+ cmpl $0, pv_info+PARAVIRT_enabled
23438+ jz 1f
23439+ i = 0
23440+ .rept USER_PGD_PTRS
23441+ mov i*8(%rbx),%rsi
23442+ mov $0x67,%sil
23443+ lea i*8(%rbx),%rdi
23444+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23445+ i = i + 1
23446+ .endr
23447+ jmp 2f
23448+1:
23449+#endif
23450+
23451+ i = 0
23452+ .rept USER_PGD_PTRS
23453+ movb $0x67,i*8(%rbx)
23454+ i = i + 1
23455+ .endr
23456+2:
23457+
23458+#ifdef CONFIG_PARAVIRT
23459+ PV_RESTORE_REGS(CLBR_RDI)
23460+#endif
23461+
23462+ popq %rbx
23463+ popq %rdi
23464+ pax_force_retaddr
23465+ retq
23466+3: ud2
23467+ENDPROC(pax_exit_kernel_user)
23468+#endif
23469+
23470+ .macro pax_enter_kernel_nmi
23471+ pax_set_fptr_mask
23472+
23473+#ifdef CONFIG_PAX_KERNEXEC
23474+ GET_CR0_INTO_RDI
23475+ bts $16,%rdi
23476+ jc 110f
23477+ SET_RDI_INTO_CR0
23478+ or $2,%ebx
23479+110:
23480+#endif
23481+
23482+#ifdef CONFIG_PAX_MEMORY_UDEREF
23483+ 661: jmp 111f
23484+ .pushsection .altinstr_replacement, "a"
23485+ 662: ASM_NOP2
23486+ .popsection
23487+ .pushsection .altinstructions, "a"
23488+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23489+ .popsection
23490+ GET_CR3_INTO_RDI
23491+ cmp $0,%dil
23492+ jz 111f
23493+ sub $4097,%rdi
23494+ or $4,%ebx
23495+ bts $63,%rdi
23496+ SET_RDI_INTO_CR3
23497+ mov $__UDEREF_KERNEL_DS,%edi
23498+ mov %edi,%ss
23499+111:
23500+#endif
23501+ .endm
23502+
23503+ .macro pax_exit_kernel_nmi
23504+#ifdef CONFIG_PAX_KERNEXEC
23505+ btr $1,%ebx
23506+ jnc 110f
23507+ GET_CR0_INTO_RDI
23508+ btr $16,%rdi
23509+ SET_RDI_INTO_CR0
23510+110:
23511+#endif
23512+
23513+#ifdef CONFIG_PAX_MEMORY_UDEREF
23514+ btr $2,%ebx
23515+ jnc 111f
23516+ GET_CR3_INTO_RDI
23517+ add $4097,%rdi
23518+ bts $63,%rdi
23519+ SET_RDI_INTO_CR3
23520+ mov $__KERNEL_DS,%edi
23521+ mov %edi,%ss
23522+111:
23523+#endif
23524+ .endm
23525+
23526+ .macro pax_erase_kstack
23527+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23528+ call pax_erase_kstack
23529+#endif
23530+ .endm
23531+
23532+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23533+ENTRY(pax_erase_kstack)
23534+ pushq %rdi
23535+ pushq %rcx
23536+ pushq %rax
23537+ pushq %r11
23538+
23539+ GET_THREAD_INFO(%r11)
23540+ mov TI_lowest_stack(%r11), %rdi
23541+ mov $-0xBEEF, %rax
23542+ std
23543+
23544+1: mov %edi, %ecx
23545+ and $THREAD_SIZE_asm - 1, %ecx
23546+ shr $3, %ecx
23547+ repne scasq
23548+ jecxz 2f
23549+
23550+ cmp $2*8, %ecx
23551+ jc 2f
23552+
23553+ mov $2*8, %ecx
23554+ repe scasq
23555+ jecxz 2f
23556+ jne 1b
23557+
23558+2: cld
23559+ or $2*8, %rdi
23560+ mov %esp, %ecx
23561+ sub %edi, %ecx
23562+
23563+ cmp $THREAD_SIZE_asm, %rcx
23564+ jb 3f
23565+ ud2
23566+3:
23567+
23568+ shr $3, %ecx
23569+ rep stosq
23570+
23571+ mov TI_task_thread_sp0(%r11), %rdi
23572+ sub $256, %rdi
23573+ mov %rdi, TI_lowest_stack(%r11)
23574+
23575+ popq %r11
23576+ popq %rax
23577+ popq %rcx
23578+ popq %rdi
23579+ pax_force_retaddr
23580+ ret
23581+ENDPROC(pax_erase_kstack)
23582+#endif
23583
23584 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23585 #ifdef CONFIG_TRACE_IRQFLAGS
23586@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23587 .endm
23588
23589 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23590- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23591+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23592 jnc 1f
23593 TRACE_IRQS_ON_DEBUG
23594 1:
23595@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23596 movq \tmp,R11+\offset(%rsp)
23597 .endm
23598
23599- .macro FAKE_STACK_FRAME child_rip
23600- /* push in order ss, rsp, eflags, cs, rip */
23601- xorl %eax, %eax
23602- pushq_cfi $__KERNEL_DS /* ss */
23603- /*CFI_REL_OFFSET ss,0*/
23604- pushq_cfi %rax /* rsp */
23605- CFI_REL_OFFSET rsp,0
23606- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23607- /*CFI_REL_OFFSET rflags,0*/
23608- pushq_cfi $__KERNEL_CS /* cs */
23609- /*CFI_REL_OFFSET cs,0*/
23610- pushq_cfi \child_rip /* rip */
23611- CFI_REL_OFFSET rip,0
23612- pushq_cfi %rax /* orig rax */
23613- .endm
23614-
23615- .macro UNFAKE_STACK_FRAME
23616- addq $8*6, %rsp
23617- CFI_ADJUST_CFA_OFFSET -(6*8)
23618- .endm
23619-
23620 /*
23621 * initial frame state for interrupts (and exceptions without error code)
23622 */
23623@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23624 /* save partial stack frame */
23625 .macro SAVE_ARGS_IRQ
23626 cld
23627- /* start from rbp in pt_regs and jump over */
23628- movq_cfi rdi, (RDI-RBP)
23629- movq_cfi rsi, (RSI-RBP)
23630- movq_cfi rdx, (RDX-RBP)
23631- movq_cfi rcx, (RCX-RBP)
23632- movq_cfi rax, (RAX-RBP)
23633- movq_cfi r8, (R8-RBP)
23634- movq_cfi r9, (R9-RBP)
23635- movq_cfi r10, (R10-RBP)
23636- movq_cfi r11, (R11-RBP)
23637+ /* start from r15 in pt_regs and jump over */
23638+ movq_cfi rdi, RDI
23639+ movq_cfi rsi, RSI
23640+ movq_cfi rdx, RDX
23641+ movq_cfi rcx, RCX
23642+ movq_cfi rax, RAX
23643+ movq_cfi r8, R8
23644+ movq_cfi r9, R9
23645+ movq_cfi r10, R10
23646+ movq_cfi r11, R11
23647+ movq_cfi r12, R12
23648
23649 /* Save rbp so that we can unwind from get_irq_regs() */
23650- movq_cfi rbp, 0
23651+ movq_cfi rbp, RBP
23652
23653 /* Save previous stack value */
23654 movq %rsp, %rsi
23655
23656- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23657- testl $3, CS-RBP(%rsi)
23658+ movq %rsp,%rdi /* arg1 for handler */
23659+ testb $3, CS(%rsi)
23660 je 1f
23661 SWAPGS
23662 /*
23663@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23664 0x06 /* DW_OP_deref */, \
23665 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23666 0x22 /* DW_OP_plus */
23667+
23668+#ifdef CONFIG_PAX_MEMORY_UDEREF
23669+ testb $3, CS(%rdi)
23670+ jnz 1f
23671+ pax_enter_kernel
23672+ jmp 2f
23673+1: pax_enter_kernel_user
23674+2:
23675+#else
23676+ pax_enter_kernel
23677+#endif
23678+
23679 /* We entered an interrupt context - irqs are off: */
23680 TRACE_IRQS_OFF
23681 .endm
23682@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23683 js 1f /* negative -> in kernel */
23684 SWAPGS
23685 xorl %ebx,%ebx
23686-1: ret
23687+1:
23688+#ifdef CONFIG_PAX_MEMORY_UDEREF
23689+ testb $3, CS+8(%rsp)
23690+ jnz 1f
23691+ pax_enter_kernel
23692+ jmp 2f
23693+1: pax_enter_kernel_user
23694+2:
23695+#else
23696+ pax_enter_kernel
23697+#endif
23698+ pax_force_retaddr
23699+ ret
23700 CFI_ENDPROC
23701-END(save_paranoid)
23702+ENDPROC(save_paranoid)
23703+
23704+ENTRY(save_paranoid_nmi)
23705+ XCPT_FRAME 1 RDI+8
23706+ cld
23707+ movq_cfi rdi, RDI+8
23708+ movq_cfi rsi, RSI+8
23709+ movq_cfi rdx, RDX+8
23710+ movq_cfi rcx, RCX+8
23711+ movq_cfi rax, RAX+8
23712+ movq_cfi r8, R8+8
23713+ movq_cfi r9, R9+8
23714+ movq_cfi r10, R10+8
23715+ movq_cfi r11, R11+8
23716+ movq_cfi rbx, RBX+8
23717+ movq_cfi rbp, RBP+8
23718+ movq_cfi r12, R12+8
23719+ movq_cfi r13, R13+8
23720+ movq_cfi r14, R14+8
23721+ movq_cfi r15, R15+8
23722+ movl $1,%ebx
23723+ movl $MSR_GS_BASE,%ecx
23724+ rdmsr
23725+ testl %edx,%edx
23726+ js 1f /* negative -> in kernel */
23727+ SWAPGS
23728+ xorl %ebx,%ebx
23729+1: pax_enter_kernel_nmi
23730+ pax_force_retaddr
23731+ ret
23732+ CFI_ENDPROC
23733+ENDPROC(save_paranoid_nmi)
23734
23735 /*
23736 * A newly forked process directly context switches into this address.
23737@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
23738
23739 RESTORE_REST
23740
23741- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23742+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23743 jz 1f
23744
23745 /*
23746@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
23747 jmp int_ret_from_sys_call
23748
23749 1:
23750- subq $REST_SKIP, %rsp # leave space for volatiles
23751- CFI_ADJUST_CFA_OFFSET REST_SKIP
23752 movq %rbp, %rdi
23753 call *%rbx
23754 movl $0, RAX(%rsp)
23755 RESTORE_REST
23756 jmp int_ret_from_sys_call
23757 CFI_ENDPROC
23758-END(ret_from_fork)
23759+ENDPROC(ret_from_fork)
23760
23761 /*
23762 * System call entry. Up to 6 arguments in registers are supported.
23763@@ -389,7 +849,7 @@ END(ret_from_fork)
23764 ENTRY(system_call)
23765 CFI_STARTPROC simple
23766 CFI_SIGNAL_FRAME
23767- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23768+ CFI_DEF_CFA rsp,0
23769 CFI_REGISTER rip,rcx
23770 /*CFI_REGISTER rflags,r11*/
23771 SWAPGS_UNSAFE_STACK
23772@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23773
23774 movq %rsp,PER_CPU_VAR(old_rsp)
23775 movq PER_CPU_VAR(kernel_stack),%rsp
23776+ SAVE_ARGS 8*6, 0, rax_enosys=1
23777+ pax_enter_kernel_user
23778+
23779+#ifdef CONFIG_PAX_RANDKSTACK
23780+ pax_erase_kstack
23781+#endif
23782+
23783 /*
23784 * No need to follow this irqs off/on section - it's straight
23785 * and short:
23786 */
23787 ENABLE_INTERRUPTS(CLBR_NONE)
23788- SAVE_ARGS 8, 0, rax_enosys=1
23789 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23790 movq %rcx,RIP-ARGOFFSET(%rsp)
23791 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23792- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23793+ GET_THREAD_INFO(%rcx)
23794+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23795 jnz tracesys
23796 system_call_fastpath:
23797 #if __SYSCALL_MASK == ~0
23798@@ -435,10 +902,13 @@ sysret_check:
23799 LOCKDEP_SYS_EXIT
23800 DISABLE_INTERRUPTS(CLBR_NONE)
23801 TRACE_IRQS_OFF
23802- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23803+ GET_THREAD_INFO(%rcx)
23804+ movl TI_flags(%rcx),%edx
23805 andl %edi,%edx
23806 jnz sysret_careful
23807 CFI_REMEMBER_STATE
23808+ pax_exit_kernel_user
23809+ pax_erase_kstack
23810 /*
23811 * sysretq will re-enable interrupts:
23812 */
23813@@ -497,12 +967,15 @@ sysret_audit:
23814
23815 /* Do syscall tracing */
23816 tracesys:
23817- leaq -REST_SKIP(%rsp), %rdi
23818+ movq %rsp, %rdi
23819 movq $AUDIT_ARCH_X86_64, %rsi
23820 call syscall_trace_enter_phase1
23821 test %rax, %rax
23822 jnz tracesys_phase2 /* if needed, run the slow path */
23823- LOAD_ARGS 0 /* else restore clobbered regs */
23824+
23825+ pax_erase_kstack
23826+
23827+ LOAD_ARGS /* else restore clobbered regs */
23828 jmp system_call_fastpath /* and return to the fast path */
23829
23830 tracesys_phase2:
23831@@ -513,12 +986,14 @@ tracesys_phase2:
23832 movq %rax,%rdx
23833 call syscall_trace_enter_phase2
23834
23835+ pax_erase_kstack
23836+
23837 /*
23838 * Reload arg registers from stack in case ptrace changed them.
23839 * We don't reload %rax because syscall_trace_entry_phase2() returned
23840 * the value it wants us to use in the table lookup.
23841 */
23842- LOAD_ARGS ARGOFFSET, 1
23843+ LOAD_ARGS 1
23844 RESTORE_REST
23845 #if __SYSCALL_MASK == ~0
23846 cmpq $__NR_syscall_max,%rax
23847@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
23848 andl %edi,%edx
23849 jnz int_careful
23850 andl $~TS_COMPAT,TI_status(%rcx)
23851- jmp retint_swapgs
23852+ pax_exit_kernel_user
23853+ pax_erase_kstack
23854+ jmp retint_swapgs_pax
23855
23856 /* Either reschedule or signal or syscall exit tracking needed. */
23857 /* First do a reschedule test. */
23858@@ -594,7 +1071,7 @@ int_restore_rest:
23859 TRACE_IRQS_OFF
23860 jmp int_with_check
23861 CFI_ENDPROC
23862-END(system_call)
23863+ENDPROC(system_call)
23864
23865 .macro FORK_LIKE func
23866 ENTRY(stub_\func)
23867@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
23868 DEFAULT_FRAME 0 8 /* offset 8: return address */
23869 call sys_\func
23870 RESTORE_TOP_OF_STACK %r11, 8
23871- ret $REST_SKIP /* pop extended registers */
23872+ pax_force_retaddr
23873+ ret
23874 CFI_ENDPROC
23875-END(stub_\func)
23876+ENDPROC(stub_\func)
23877 .endm
23878
23879 .macro FIXED_FRAME label,func
23880@@ -619,9 +1097,10 @@ ENTRY(\label)
23881 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23882 call \func
23883 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23884+ pax_force_retaddr
23885 ret
23886 CFI_ENDPROC
23887-END(\label)
23888+ENDPROC(\label)
23889 .endm
23890
23891 FORK_LIKE clone
23892@@ -629,19 +1108,6 @@ END(\label)
23893 FORK_LIKE vfork
23894 FIXED_FRAME stub_iopl, sys_iopl
23895
23896-ENTRY(ptregscall_common)
23897- DEFAULT_FRAME 1 8 /* offset 8: return address */
23898- RESTORE_TOP_OF_STACK %r11, 8
23899- movq_cfi_restore R15+8, r15
23900- movq_cfi_restore R14+8, r14
23901- movq_cfi_restore R13+8, r13
23902- movq_cfi_restore R12+8, r12
23903- movq_cfi_restore RBP+8, rbp
23904- movq_cfi_restore RBX+8, rbx
23905- ret $REST_SKIP /* pop extended registers */
23906- CFI_ENDPROC
23907-END(ptregscall_common)
23908-
23909 ENTRY(stub_execve)
23910 CFI_STARTPROC
23911 addq $8, %rsp
23912@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
23913 RESTORE_REST
23914 jmp int_ret_from_sys_call
23915 CFI_ENDPROC
23916-END(stub_execve)
23917+ENDPROC(stub_execve)
23918
23919 ENTRY(stub_execveat)
23920 CFI_STARTPROC
23921@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
23922 RESTORE_REST
23923 jmp int_ret_from_sys_call
23924 CFI_ENDPROC
23925-END(stub_execveat)
23926+ENDPROC(stub_execveat)
23927
23928 /*
23929 * sigreturn is special because it needs to restore all registers on return.
23930@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23931 RESTORE_REST
23932 jmp int_ret_from_sys_call
23933 CFI_ENDPROC
23934-END(stub_rt_sigreturn)
23935+ENDPROC(stub_rt_sigreturn)
23936
23937 #ifdef CONFIG_X86_X32_ABI
23938 ENTRY(stub_x32_rt_sigreturn)
23939@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23940 RESTORE_REST
23941 jmp int_ret_from_sys_call
23942 CFI_ENDPROC
23943-END(stub_x32_rt_sigreturn)
23944+ENDPROC(stub_x32_rt_sigreturn)
23945
23946 ENTRY(stub_x32_execve)
23947 CFI_STARTPROC
23948@@ -763,7 +1229,7 @@ vector=vector+1
23949 2: jmp common_interrupt
23950 .endr
23951 CFI_ENDPROC
23952-END(irq_entries_start)
23953+ENDPROC(irq_entries_start)
23954
23955 .previous
23956 END(interrupt)
23957@@ -780,8 +1246,8 @@ END(interrupt)
23958 /* 0(%rsp): ~(interrupt number) */
23959 .macro interrupt func
23960 /* reserve pt_regs for scratch regs and rbp */
23961- subq $ORIG_RAX-RBP, %rsp
23962- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23963+ subq $ORIG_RAX, %rsp
23964+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23965 SAVE_ARGS_IRQ
23966 call \func
23967 .endm
23968@@ -804,14 +1270,14 @@ ret_from_intr:
23969
23970 /* Restore saved previous stack */
23971 popq %rsi
23972- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23973- leaq ARGOFFSET-RBP(%rsi), %rsp
23974+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23975+ movq %rsi, %rsp
23976 CFI_DEF_CFA_REGISTER rsp
23977- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23978+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23979
23980 exit_intr:
23981 GET_THREAD_INFO(%rcx)
23982- testl $3,CS-ARGOFFSET(%rsp)
23983+ testb $3,CS-ARGOFFSET(%rsp)
23984 je retint_kernel
23985
23986 /* Interrupt came from user space */
23987@@ -833,12 +1299,35 @@ retint_swapgs: /* return to user-space */
23988 * The iretq could re-enable interrupts:
23989 */
23990 DISABLE_INTERRUPTS(CLBR_ANY)
23991+ pax_exit_kernel_user
23992+retint_swapgs_pax:
23993 TRACE_IRQS_IRETQ
23994 SWAPGS
23995 jmp restore_args
23996
23997 retint_restore_args: /* return to kernel space */
23998 DISABLE_INTERRUPTS(CLBR_ANY)
23999+ pax_exit_kernel
24000+
24001+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24002+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24003+ * namely calling EFI runtime services with a phys mapping. We're
24004+ * starting off with NOPs and patch in the real instrumentation
24005+ * (BTS/OR) before starting any userland process; even before starting
24006+ * up the APs.
24007+ */
24008+ .pushsection .altinstr_replacement, "a"
24009+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24010+ 602:
24011+ .popsection
24012+ 603: .fill 602b-601b, 1, 0x90
24013+ .pushsection .altinstructions, "a"
24014+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24015+ .popsection
24016+#else
24017+ pax_force_retaddr (RIP-ARGOFFSET)
24018+#endif
24019+
24020 /*
24021 * The iretq could re-enable interrupts:
24022 */
24023@@ -876,15 +1365,15 @@ native_irq_return_ldt:
24024 SWAPGS
24025 movq PER_CPU_VAR(espfix_waddr),%rdi
24026 movq %rax,(0*8)(%rdi) /* RAX */
24027- movq (2*8)(%rsp),%rax /* RIP */
24028+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
24029 movq %rax,(1*8)(%rdi)
24030- movq (3*8)(%rsp),%rax /* CS */
24031+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
24032 movq %rax,(2*8)(%rdi)
24033- movq (4*8)(%rsp),%rax /* RFLAGS */
24034+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
24035 movq %rax,(3*8)(%rdi)
24036- movq (6*8)(%rsp),%rax /* SS */
24037+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
24038 movq %rax,(5*8)(%rdi)
24039- movq (5*8)(%rsp),%rax /* RSP */
24040+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
24041 movq %rax,(4*8)(%rdi)
24042 andl $0xffff0000,%eax
24043 popq_cfi %rdi
24044@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
24045 jmp exit_intr
24046 #endif
24047 CFI_ENDPROC
24048-END(common_interrupt)
24049+ENDPROC(common_interrupt)
24050
24051 /*
24052 * APIC interrupts.
24053@@ -952,7 +1441,7 @@ ENTRY(\sym)
24054 interrupt \do_sym
24055 jmp ret_from_intr
24056 CFI_ENDPROC
24057-END(\sym)
24058+ENDPROC(\sym)
24059 .endm
24060
24061 #ifdef CONFIG_TRACING
24062@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24063 /*
24064 * Exception entry points.
24065 */
24066-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24067+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24068
24069 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24070 ENTRY(\sym)
24071@@ -1076,6 +1565,12 @@ ENTRY(\sym)
24072 .endif
24073
24074 .if \shift_ist != -1
24075+#ifdef CONFIG_SMP
24076+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24077+ lea init_tss(%r13), %r13
24078+#else
24079+ lea init_tss(%rip), %r13
24080+#endif
24081 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24082 .endif
24083
24084@@ -1092,7 +1587,7 @@ ENTRY(\sym)
24085 .endif
24086
24087 CFI_ENDPROC
24088-END(\sym)
24089+ENDPROC(\sym)
24090 .endm
24091
24092 #ifdef CONFIG_TRACING
24093@@ -1133,9 +1628,10 @@ gs_change:
24094 2: mfence /* workaround */
24095 SWAPGS
24096 popfq_cfi
24097+ pax_force_retaddr
24098 ret
24099 CFI_ENDPROC
24100-END(native_load_gs_index)
24101+ENDPROC(native_load_gs_index)
24102
24103 _ASM_EXTABLE(gs_change,bad_gs)
24104 .section .fixup,"ax"
24105@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24106 CFI_DEF_CFA_REGISTER rsp
24107 CFI_ADJUST_CFA_OFFSET -8
24108 decl PER_CPU_VAR(irq_count)
24109+ pax_force_retaddr
24110 ret
24111 CFI_ENDPROC
24112-END(do_softirq_own_stack)
24113+ENDPROC(do_softirq_own_stack)
24114
24115 #ifdef CONFIG_XEN
24116 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24117@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24118 decl PER_CPU_VAR(irq_count)
24119 jmp error_exit
24120 CFI_ENDPROC
24121-END(xen_do_hypervisor_callback)
24122+ENDPROC(xen_do_hypervisor_callback)
24123
24124 /*
24125 * Hypervisor uses this for application faults while it executes.
24126@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24127 SAVE_ALL
24128 jmp error_exit
24129 CFI_ENDPROC
24130-END(xen_failsafe_callback)
24131+ENDPROC(xen_failsafe_callback)
24132
24133 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24134 xen_hvm_callback_vector xen_evtchn_do_upcall
24135@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
24136 DEFAULT_FRAME
24137 DISABLE_INTERRUPTS(CLBR_NONE)
24138 TRACE_IRQS_OFF_DEBUG
24139- testl %ebx,%ebx /* swapgs needed? */
24140+ testl $1,%ebx /* swapgs needed? */
24141 jnz paranoid_restore
24142- testl $3,CS(%rsp)
24143+ testb $3,CS(%rsp)
24144 jnz paranoid_userspace
24145+#ifdef CONFIG_PAX_MEMORY_UDEREF
24146+ pax_exit_kernel
24147+ TRACE_IRQS_IRETQ 0
24148+ SWAPGS_UNSAFE_STACK
24149+ RESTORE_ALL 8
24150+ pax_force_retaddr_bts
24151+ jmp irq_return
24152+#endif
24153 paranoid_swapgs:
24154+#ifdef CONFIG_PAX_MEMORY_UDEREF
24155+ pax_exit_kernel_user
24156+#else
24157+ pax_exit_kernel
24158+#endif
24159 TRACE_IRQS_IRETQ 0
24160 SWAPGS_UNSAFE_STACK
24161 RESTORE_ALL 8
24162 jmp irq_return
24163 paranoid_restore:
24164+ pax_exit_kernel
24165 TRACE_IRQS_IRETQ_DEBUG 0
24166 RESTORE_ALL 8
24167+ pax_force_retaddr_bts
24168 jmp irq_return
24169 paranoid_userspace:
24170 GET_THREAD_INFO(%rcx)
24171@@ -1349,7 +1861,7 @@ paranoid_schedule:
24172 TRACE_IRQS_OFF
24173 jmp paranoid_userspace
24174 CFI_ENDPROC
24175-END(paranoid_exit)
24176+ENDPROC(paranoid_exit)
24177
24178 /*
24179 * Exception entry point. This expects an error code/orig_rax on the stack.
24180@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
24181 movq %r14, R14+8(%rsp)
24182 movq %r15, R15+8(%rsp)
24183 xorl %ebx,%ebx
24184- testl $3,CS+8(%rsp)
24185+ testb $3,CS+8(%rsp)
24186 je error_kernelspace
24187 error_swapgs:
24188 SWAPGS
24189 error_sti:
24190+#ifdef CONFIG_PAX_MEMORY_UDEREF
24191+ testb $3, CS+8(%rsp)
24192+ jnz 1f
24193+ pax_enter_kernel
24194+ jmp 2f
24195+1: pax_enter_kernel_user
24196+2:
24197+#else
24198+ pax_enter_kernel
24199+#endif
24200 TRACE_IRQS_OFF
24201+ pax_force_retaddr
24202 ret
24203
24204 /*
24205@@ -1416,7 +1939,7 @@ error_bad_iret:
24206 decl %ebx /* Return to usergs */
24207 jmp error_sti
24208 CFI_ENDPROC
24209-END(error_entry)
24210+ENDPROC(error_entry)
24211
24212
24213 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24214@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
24215 DISABLE_INTERRUPTS(CLBR_NONE)
24216 TRACE_IRQS_OFF
24217 GET_THREAD_INFO(%rcx)
24218- testl %eax,%eax
24219+ testl $1,%eax
24220 jne retint_kernel
24221 LOCKDEP_SYS_EXIT_IRQ
24222 movl TI_flags(%rcx),%edx
24223@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
24224 jnz retint_careful
24225 jmp retint_swapgs
24226 CFI_ENDPROC
24227-END(error_exit)
24228+ENDPROC(error_exit)
24229
24230 /*
24231 * Test if a given stack is an NMI stack or not.
24232@@ -1494,9 +2017,11 @@ ENTRY(nmi)
24233 * If %cs was not the kernel segment, then the NMI triggered in user
24234 * space, which means it is definitely not nested.
24235 */
24236+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24237+ je 1f
24238 cmpl $__KERNEL_CS, 16(%rsp)
24239 jne first_nmi
24240-
24241+1:
24242 /*
24243 * Check the special variable on the stack to see if NMIs are
24244 * executing.
24245@@ -1530,8 +2055,7 @@ nested_nmi:
24246
24247 1:
24248 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24249- leaq -1*8(%rsp), %rdx
24250- movq %rdx, %rsp
24251+ subq $8, %rsp
24252 CFI_ADJUST_CFA_OFFSET 1*8
24253 leaq -10*8(%rsp), %rdx
24254 pushq_cfi $__KERNEL_DS
24255@@ -1549,6 +2073,7 @@ nested_nmi_out:
24256 CFI_RESTORE rdx
24257
24258 /* No need to check faults here */
24259+# pax_force_retaddr_bts
24260 INTERRUPT_RETURN
24261
24262 CFI_RESTORE_STATE
24263@@ -1645,13 +2170,13 @@ end_repeat_nmi:
24264 subq $ORIG_RAX-R15, %rsp
24265 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24266 /*
24267- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24268+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24269 * as we should not be calling schedule in NMI context.
24270 * Even with normal interrupts enabled. An NMI should not be
24271 * setting NEED_RESCHED or anything that normal interrupts and
24272 * exceptions might do.
24273 */
24274- call save_paranoid
24275+ call save_paranoid_nmi
24276 DEFAULT_FRAME 0
24277
24278 /*
24279@@ -1661,9 +2186,9 @@ end_repeat_nmi:
24280 * NMI itself takes a page fault, the page fault that was preempted
24281 * will read the information from the NMI page fault and not the
24282 * origin fault. Save it off and restore it if it changes.
24283- * Use the r12 callee-saved register.
24284+ * Use the r13 callee-saved register.
24285 */
24286- movq %cr2, %r12
24287+ movq %cr2, %r13
24288
24289 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24290 movq %rsp,%rdi
24291@@ -1672,29 +2197,34 @@ end_repeat_nmi:
24292
24293 /* Did the NMI take a page fault? Restore cr2 if it did */
24294 movq %cr2, %rcx
24295- cmpq %rcx, %r12
24296+ cmpq %rcx, %r13
24297 je 1f
24298- movq %r12, %cr2
24299+ movq %r13, %cr2
24300 1:
24301
24302- testl %ebx,%ebx /* swapgs needed? */
24303+ testl $1,%ebx /* swapgs needed? */
24304 jnz nmi_restore
24305 nmi_swapgs:
24306 SWAPGS_UNSAFE_STACK
24307 nmi_restore:
24308+ pax_exit_kernel_nmi
24309 /* Pop the extra iret frame at once */
24310 RESTORE_ALL 6*8
24311+ testb $3, 8(%rsp)
24312+ jnz 1f
24313+ pax_force_retaddr_bts
24314+1:
24315
24316 /* Clear the NMI executing stack variable */
24317 movq $0, 5*8(%rsp)
24318 jmp irq_return
24319 CFI_ENDPROC
24320-END(nmi)
24321+ENDPROC(nmi)
24322
24323 ENTRY(ignore_sysret)
24324 CFI_STARTPROC
24325 mov $-ENOSYS,%eax
24326 sysret
24327 CFI_ENDPROC
24328-END(ignore_sysret)
24329+ENDPROC(ignore_sysret)
24330
24331diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24332index f5d0730..5bce89c 100644
24333--- a/arch/x86/kernel/espfix_64.c
24334+++ b/arch/x86/kernel/espfix_64.c
24335@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24336 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24337 static void *espfix_pages[ESPFIX_MAX_PAGES];
24338
24339-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24340- __aligned(PAGE_SIZE);
24341+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24342
24343 static unsigned int page_random, slot_random;
24344
24345@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24346 void __init init_espfix_bsp(void)
24347 {
24348 pgd_t *pgd_p;
24349+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24350
24351 /* Install the espfix pud into the kernel page directory */
24352- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24353+ pgd_p = &init_level4_pgt[index];
24354 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24355
24356+#ifdef CONFIG_PAX_PER_CPU_PGD
24357+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24358+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24359+#endif
24360+
24361 /* Randomize the locations */
24362 init_espfix_random();
24363
24364@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24365 set_pte(&pte_p[n*PTE_STRIDE], pte);
24366
24367 /* Job is done for this CPU and any CPU which shares this page */
24368- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24369+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24370
24371 unlock_done:
24372 mutex_unlock(&espfix_init_mutex);
24373diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24374index 8b7b0a5..2395f29 100644
24375--- a/arch/x86/kernel/ftrace.c
24376+++ b/arch/x86/kernel/ftrace.c
24377@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24378 * kernel identity mapping to modify code.
24379 */
24380 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24381- ip = (unsigned long)__va(__pa_symbol(ip));
24382+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24383
24384 return ip;
24385 }
24386@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24387 {
24388 unsigned char replaced[MCOUNT_INSN_SIZE];
24389
24390+ ip = ktla_ktva(ip);
24391+
24392 /*
24393 * Note: Due to modules and __init, code can
24394 * disappear and change, we need to protect against faulting
24395@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24396 unsigned char old[MCOUNT_INSN_SIZE];
24397 int ret;
24398
24399- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24400+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24401
24402 ftrace_update_func = ip;
24403 /* Make sure the breakpoints see the ftrace_update_func update */
24404@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24405 unsigned char replaced[MCOUNT_INSN_SIZE];
24406 unsigned char brk = BREAKPOINT_INSTRUCTION;
24407
24408- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24409+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24410 return -EFAULT;
24411
24412 /* Make sure it is what we expect it to be */
24413diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24414index eda1a86..8f6df48 100644
24415--- a/arch/x86/kernel/head64.c
24416+++ b/arch/x86/kernel/head64.c
24417@@ -67,12 +67,12 @@ again:
24418 pgd = *pgd_p;
24419
24420 /*
24421- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24422- * critical -- __PAGE_OFFSET would point us back into the dynamic
24423+ * The use of __early_va rather than __va here is critical:
24424+ * __va would point us back into the dynamic
24425 * range and we might end up looping forever...
24426 */
24427 if (pgd)
24428- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24429+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24430 else {
24431 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24432 reset_early_page_tables();
24433@@ -82,13 +82,13 @@ again:
24434 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24435 for (i = 0; i < PTRS_PER_PUD; i++)
24436 pud_p[i] = 0;
24437- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24438+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24439 }
24440 pud_p += pud_index(address);
24441 pud = *pud_p;
24442
24443 if (pud)
24444- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24445+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24446 else {
24447 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24448 reset_early_page_tables();
24449@@ -98,7 +98,7 @@ again:
24450 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24451 for (i = 0; i < PTRS_PER_PMD; i++)
24452 pmd_p[i] = 0;
24453- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24454+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24455 }
24456 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24457 pmd_p[pmd_index(address)] = pmd;
24458@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24459 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24460 early_printk("Kernel alive\n");
24461
24462- clear_page(init_level4_pgt);
24463 /* set init_level4_pgt kernel high mapping*/
24464 init_level4_pgt[511] = early_level4_pgt[511];
24465
24466diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24467index f36bd42..0ab4474 100644
24468--- a/arch/x86/kernel/head_32.S
24469+++ b/arch/x86/kernel/head_32.S
24470@@ -26,6 +26,12 @@
24471 /* Physical address */
24472 #define pa(X) ((X) - __PAGE_OFFSET)
24473
24474+#ifdef CONFIG_PAX_KERNEXEC
24475+#define ta(X) (X)
24476+#else
24477+#define ta(X) ((X) - __PAGE_OFFSET)
24478+#endif
24479+
24480 /*
24481 * References to members of the new_cpu_data structure.
24482 */
24483@@ -55,11 +61,7 @@
24484 * and small than max_low_pfn, otherwise will waste some page table entries
24485 */
24486
24487-#if PTRS_PER_PMD > 1
24488-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24489-#else
24490-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24491-#endif
24492+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24493
24494 /* Number of possible pages in the lowmem region */
24495 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24496@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24497 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24498
24499 /*
24500+ * Real beginning of normal "text" segment
24501+ */
24502+ENTRY(stext)
24503+ENTRY(_stext)
24504+
24505+/*
24506 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24507 * %esi points to the real-mode code as a 32-bit pointer.
24508 * CS and DS must be 4 GB flat segments, but we don't depend on
24509@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24510 * can.
24511 */
24512 __HEAD
24513+
24514+#ifdef CONFIG_PAX_KERNEXEC
24515+ jmp startup_32
24516+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24517+.fill PAGE_SIZE-5,1,0xcc
24518+#endif
24519+
24520 ENTRY(startup_32)
24521 movl pa(stack_start),%ecx
24522
24523@@ -106,6 +121,59 @@ ENTRY(startup_32)
24524 2:
24525 leal -__PAGE_OFFSET(%ecx),%esp
24526
24527+#ifdef CONFIG_SMP
24528+ movl $pa(cpu_gdt_table),%edi
24529+ movl $__per_cpu_load,%eax
24530+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24531+ rorl $16,%eax
24532+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24533+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24534+ movl $__per_cpu_end - 1,%eax
24535+ subl $__per_cpu_start,%eax
24536+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24537+#endif
24538+
24539+#ifdef CONFIG_PAX_MEMORY_UDEREF
24540+ movl $NR_CPUS,%ecx
24541+ movl $pa(cpu_gdt_table),%edi
24542+1:
24543+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24544+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24545+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24546+ addl $PAGE_SIZE_asm,%edi
24547+ loop 1b
24548+#endif
24549+
24550+#ifdef CONFIG_PAX_KERNEXEC
24551+ movl $pa(boot_gdt),%edi
24552+ movl $__LOAD_PHYSICAL_ADDR,%eax
24553+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24554+ rorl $16,%eax
24555+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24556+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24557+ rorl $16,%eax
24558+
24559+ ljmp $(__BOOT_CS),$1f
24560+1:
24561+
24562+ movl $NR_CPUS,%ecx
24563+ movl $pa(cpu_gdt_table),%edi
24564+ addl $__PAGE_OFFSET,%eax
24565+1:
24566+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24567+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24568+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24569+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24570+ rorl $16,%eax
24571+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24572+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24573+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24574+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24575+ rorl $16,%eax
24576+ addl $PAGE_SIZE_asm,%edi
24577+ loop 1b
24578+#endif
24579+
24580 /*
24581 * Clear BSS first so that there are no surprises...
24582 */
24583@@ -201,8 +269,11 @@ ENTRY(startup_32)
24584 movl %eax, pa(max_pfn_mapped)
24585
24586 /* Do early initialization of the fixmap area */
24587- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24588- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24589+#ifdef CONFIG_COMPAT_VDSO
24590+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24591+#else
24592+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24593+#endif
24594 #else /* Not PAE */
24595
24596 page_pde_offset = (__PAGE_OFFSET >> 20);
24597@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24598 movl %eax, pa(max_pfn_mapped)
24599
24600 /* Do early initialization of the fixmap area */
24601- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24602- movl %eax,pa(initial_page_table+0xffc)
24603+#ifdef CONFIG_COMPAT_VDSO
24604+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24605+#else
24606+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24607+#endif
24608 #endif
24609
24610 #ifdef CONFIG_PARAVIRT
24611@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24612 cmpl $num_subarch_entries, %eax
24613 jae bad_subarch
24614
24615- movl pa(subarch_entries)(,%eax,4), %eax
24616- subl $__PAGE_OFFSET, %eax
24617- jmp *%eax
24618+ jmp *pa(subarch_entries)(,%eax,4)
24619
24620 bad_subarch:
24621 WEAK(lguest_entry)
24622@@ -261,10 +333,10 @@ WEAK(xen_entry)
24623 __INITDATA
24624
24625 subarch_entries:
24626- .long default_entry /* normal x86/PC */
24627- .long lguest_entry /* lguest hypervisor */
24628- .long xen_entry /* Xen hypervisor */
24629- .long default_entry /* Moorestown MID */
24630+ .long ta(default_entry) /* normal x86/PC */
24631+ .long ta(lguest_entry) /* lguest hypervisor */
24632+ .long ta(xen_entry) /* Xen hypervisor */
24633+ .long ta(default_entry) /* Moorestown MID */
24634 num_subarch_entries = (. - subarch_entries) / 4
24635 .previous
24636 #else
24637@@ -354,6 +426,7 @@ default_entry:
24638 movl pa(mmu_cr4_features),%eax
24639 movl %eax,%cr4
24640
24641+#ifdef CONFIG_X86_PAE
24642 testb $X86_CR4_PAE, %al # check if PAE is enabled
24643 jz enable_paging
24644
24645@@ -382,6 +455,9 @@ default_entry:
24646 /* Make changes effective */
24647 wrmsr
24648
24649+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24650+#endif
24651+
24652 enable_paging:
24653
24654 /*
24655@@ -449,14 +525,20 @@ is486:
24656 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24657 movl %eax,%ss # after changing gdt.
24658
24659- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24660+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24661 movl %eax,%ds
24662 movl %eax,%es
24663
24664 movl $(__KERNEL_PERCPU), %eax
24665 movl %eax,%fs # set this cpu's percpu
24666
24667+#ifdef CONFIG_CC_STACKPROTECTOR
24668 movl $(__KERNEL_STACK_CANARY),%eax
24669+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24670+ movl $(__USER_DS),%eax
24671+#else
24672+ xorl %eax,%eax
24673+#endif
24674 movl %eax,%gs
24675
24676 xorl %eax,%eax # Clear LDT
24677@@ -512,8 +594,11 @@ setup_once:
24678 * relocation. Manually set base address in stack canary
24679 * segment descriptor.
24680 */
24681- movl $gdt_page,%eax
24682+ movl $cpu_gdt_table,%eax
24683 movl $stack_canary,%ecx
24684+#ifdef CONFIG_SMP
24685+ addl $__per_cpu_load,%ecx
24686+#endif
24687 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24688 shrl $16, %ecx
24689 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24690@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24691 cmpl $2,(%esp) # X86_TRAP_NMI
24692 je is_nmi # Ignore NMI
24693
24694- cmpl $2,%ss:early_recursion_flag
24695+ cmpl $1,%ss:early_recursion_flag
24696 je hlt_loop
24697 incl %ss:early_recursion_flag
24698
24699@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24700 pushl (20+6*4)(%esp) /* trapno */
24701 pushl $fault_msg
24702 call printk
24703-#endif
24704 call dump_stack
24705+#endif
24706 hlt_loop:
24707 hlt
24708 jmp hlt_loop
24709@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24710 /* This is the default interrupt "handler" :-) */
24711 ALIGN
24712 ignore_int:
24713- cld
24714 #ifdef CONFIG_PRINTK
24715+ cmpl $2,%ss:early_recursion_flag
24716+ je hlt_loop
24717+ incl %ss:early_recursion_flag
24718+ cld
24719 pushl %eax
24720 pushl %ecx
24721 pushl %edx
24722@@ -617,9 +705,6 @@ ignore_int:
24723 movl $(__KERNEL_DS),%eax
24724 movl %eax,%ds
24725 movl %eax,%es
24726- cmpl $2,early_recursion_flag
24727- je hlt_loop
24728- incl early_recursion_flag
24729 pushl 16(%esp)
24730 pushl 24(%esp)
24731 pushl 32(%esp)
24732@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24733 /*
24734 * BSS section
24735 */
24736-__PAGE_ALIGNED_BSS
24737- .align PAGE_SIZE
24738 #ifdef CONFIG_X86_PAE
24739+.section .initial_pg_pmd,"a",@progbits
24740 initial_pg_pmd:
24741 .fill 1024*KPMDS,4,0
24742 #else
24743+.section .initial_page_table,"a",@progbits
24744 ENTRY(initial_page_table)
24745 .fill 1024,4,0
24746 #endif
24747+.section .initial_pg_fixmap,"a",@progbits
24748 initial_pg_fixmap:
24749 .fill 1024,4,0
24750+.section .empty_zero_page,"a",@progbits
24751 ENTRY(empty_zero_page)
24752 .fill 4096,1,0
24753+.section .swapper_pg_dir,"a",@progbits
24754 ENTRY(swapper_pg_dir)
24755+#ifdef CONFIG_X86_PAE
24756+ .fill 4,8,0
24757+#else
24758 .fill 1024,4,0
24759+#endif
24760
24761 /*
24762 * This starts the data section.
24763 */
24764 #ifdef CONFIG_X86_PAE
24765-__PAGE_ALIGNED_DATA
24766- /* Page-aligned for the benefit of paravirt? */
24767- .align PAGE_SIZE
24768+.section .initial_page_table,"a",@progbits
24769 ENTRY(initial_page_table)
24770 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24771 # if KPMDS == 3
24772@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24773 # error "Kernel PMDs should be 1, 2 or 3"
24774 # endif
24775 .align PAGE_SIZE /* needs to be page-sized too */
24776+
24777+#ifdef CONFIG_PAX_PER_CPU_PGD
24778+ENTRY(cpu_pgd)
24779+ .rept 2*NR_CPUS
24780+ .fill 4,8,0
24781+ .endr
24782+#endif
24783+
24784 #endif
24785
24786 .data
24787 .balign 4
24788 ENTRY(stack_start)
24789- .long init_thread_union+THREAD_SIZE
24790+ .long init_thread_union+THREAD_SIZE-8
24791
24792 __INITRODATA
24793 int_msg:
24794@@ -727,7 +825,7 @@ fault_msg:
24795 * segment size, and 32-bit linear address value:
24796 */
24797
24798- .data
24799+.section .rodata,"a",@progbits
24800 .globl boot_gdt_descr
24801 .globl idt_descr
24802
24803@@ -736,7 +834,7 @@ fault_msg:
24804 .word 0 # 32 bit align gdt_desc.address
24805 boot_gdt_descr:
24806 .word __BOOT_DS+7
24807- .long boot_gdt - __PAGE_OFFSET
24808+ .long pa(boot_gdt)
24809
24810 .word 0 # 32-bit align idt_desc.address
24811 idt_descr:
24812@@ -747,7 +845,7 @@ idt_descr:
24813 .word 0 # 32 bit align gdt_desc.address
24814 ENTRY(early_gdt_descr)
24815 .word GDT_ENTRIES*8-1
24816- .long gdt_page /* Overwritten for secondary CPUs */
24817+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24818
24819 /*
24820 * The boot_gdt must mirror the equivalent in setup.S and is
24821@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24822 .align L1_CACHE_BYTES
24823 ENTRY(boot_gdt)
24824 .fill GDT_ENTRY_BOOT_CS,8,0
24825- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24826- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24827+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24828+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24829+
24830+ .align PAGE_SIZE_asm
24831+ENTRY(cpu_gdt_table)
24832+ .rept NR_CPUS
24833+ .quad 0x0000000000000000 /* NULL descriptor */
24834+ .quad 0x0000000000000000 /* 0x0b reserved */
24835+ .quad 0x0000000000000000 /* 0x13 reserved */
24836+ .quad 0x0000000000000000 /* 0x1b reserved */
24837+
24838+#ifdef CONFIG_PAX_KERNEXEC
24839+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24840+#else
24841+ .quad 0x0000000000000000 /* 0x20 unused */
24842+#endif
24843+
24844+ .quad 0x0000000000000000 /* 0x28 unused */
24845+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24846+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24847+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24848+ .quad 0x0000000000000000 /* 0x4b reserved */
24849+ .quad 0x0000000000000000 /* 0x53 reserved */
24850+ .quad 0x0000000000000000 /* 0x5b reserved */
24851+
24852+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24853+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24854+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24855+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24856+
24857+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24858+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24859+
24860+ /*
24861+ * Segments used for calling PnP BIOS have byte granularity.
24862+ * The code segments and data segments have fixed 64k limits,
24863+ * the transfer segment sizes are set at run time.
24864+ */
24865+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24866+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24867+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24868+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24869+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24870+
24871+ /*
24872+ * The APM segments have byte granularity and their bases
24873+ * are set at run time. All have 64k limits.
24874+ */
24875+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24876+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24877+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24878+
24879+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24880+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24881+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24882+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24883+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24884+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24885+
24886+ /* Be sure this is zeroed to avoid false validations in Xen */
24887+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24888+ .endr
24889diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24890index a468c0a..8b5a879 100644
24891--- a/arch/x86/kernel/head_64.S
24892+++ b/arch/x86/kernel/head_64.S
24893@@ -20,6 +20,8 @@
24894 #include <asm/processor-flags.h>
24895 #include <asm/percpu.h>
24896 #include <asm/nops.h>
24897+#include <asm/cpufeature.h>
24898+#include <asm/alternative-asm.h>
24899
24900 #ifdef CONFIG_PARAVIRT
24901 #include <asm/asm-offsets.h>
24902@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24903 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24904 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24905 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24906+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24907+L3_VMALLOC_START = pud_index(VMALLOC_START)
24908+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24909+L3_VMALLOC_END = pud_index(VMALLOC_END)
24910+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24911+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24912
24913 .text
24914 __HEAD
24915@@ -89,11 +97,24 @@ startup_64:
24916 * Fixup the physical addresses in the page table
24917 */
24918 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24919+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24920+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24921+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24922+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24923+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24924
24925- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24926- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24927+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24928+#ifndef CONFIG_XEN
24929+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24930+#endif
24931+
24932+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24933+
24934+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24935+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24936
24937 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24938+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24939
24940 /*
24941 * Set up the identity mapping for the switchover. These
24942@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24943 * after the boot processor executes this code.
24944 */
24945
24946+ orq $-1, %rbp
24947 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24948 1:
24949
24950- /* Enable PAE mode and PGE */
24951- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24952+ /* Enable PAE mode and PSE/PGE */
24953+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24954 movq %rcx, %cr4
24955
24956 /* Setup early boot stage 4 level pagetables. */
24957@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24958 movl $MSR_EFER, %ecx
24959 rdmsr
24960 btsl $_EFER_SCE, %eax /* Enable System Call */
24961- btl $20,%edi /* No Execute supported? */
24962+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24963 jnc 1f
24964 btsl $_EFER_NX, %eax
24965+ cmpq $-1, %rbp
24966+ je 1f
24967 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24968+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24969+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24970+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24971+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24972+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24973+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24974+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24975 1: wrmsr /* Make changes effective */
24976
24977 /* Setup cr0 */
24978@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24979 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24980 * address given in m16:64.
24981 */
24982+ pax_set_fptr_mask
24983 movq initial_code(%rip),%rax
24984 pushq $0 # fake return address to stop unwinder
24985 pushq $__KERNEL_CS # set correct cs
24986@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24987 .quad INIT_PER_CPU_VAR(irq_stack_union)
24988
24989 GLOBAL(stack_start)
24990- .quad init_thread_union+THREAD_SIZE-8
24991+ .quad init_thread_union+THREAD_SIZE-16
24992 .word 0
24993 __FINITDATA
24994
24995@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24996 call dump_stack
24997 #ifdef CONFIG_KALLSYMS
24998 leaq early_idt_ripmsg(%rip),%rdi
24999- movq 40(%rsp),%rsi # %rip again
25000+ movq 88(%rsp),%rsi # %rip again
25001 call __print_symbol
25002 #endif
25003 #endif /* EARLY_PRINTK */
25004@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25005 early_recursion_flag:
25006 .long 0
25007
25008+ .section .rodata,"a",@progbits
25009 #ifdef CONFIG_EARLY_PRINTK
25010 early_idt_msg:
25011 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25012@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25013 NEXT_PAGE(early_dynamic_pgts)
25014 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25015
25016- .data
25017+ .section .rodata,"a",@progbits
25018
25019-#ifndef CONFIG_XEN
25020 NEXT_PAGE(init_level4_pgt)
25021- .fill 512,8,0
25022-#else
25023-NEXT_PAGE(init_level4_pgt)
25024- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25025 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25026 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25027+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25028+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25029+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25030+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25031+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25032+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25033 .org init_level4_pgt + L4_START_KERNEL*8, 0
25034 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25035 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25036
25037+#ifdef CONFIG_PAX_PER_CPU_PGD
25038+NEXT_PAGE(cpu_pgd)
25039+ .rept 2*NR_CPUS
25040+ .fill 512,8,0
25041+ .endr
25042+#endif
25043+
25044 NEXT_PAGE(level3_ident_pgt)
25045 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25046+#ifdef CONFIG_XEN
25047 .fill 511, 8, 0
25048+#else
25049+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25050+ .fill 510,8,0
25051+#endif
25052+
25053+NEXT_PAGE(level3_vmalloc_start_pgt)
25054+ .fill 512,8,0
25055+
25056+NEXT_PAGE(level3_vmalloc_end_pgt)
25057+ .fill 512,8,0
25058+
25059+NEXT_PAGE(level3_vmemmap_pgt)
25060+ .fill L3_VMEMMAP_START,8,0
25061+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25062+
25063 NEXT_PAGE(level2_ident_pgt)
25064- /* Since I easily can, map the first 1G.
25065+ /* Since I easily can, map the first 2G.
25066 * Don't set NX because code runs from these pages.
25067 */
25068- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25069-#endif
25070+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25071
25072 NEXT_PAGE(level3_kernel_pgt)
25073 .fill L3_START_KERNEL,8,0
25074@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25075 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25076 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25077
25078+NEXT_PAGE(level2_vmemmap_pgt)
25079+ .fill 512,8,0
25080+
25081 NEXT_PAGE(level2_kernel_pgt)
25082 /*
25083 * 512 MB kernel mapping. We spend a full page on this pagetable
25084@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25085 NEXT_PAGE(level2_fixmap_pgt)
25086 .fill 506,8,0
25087 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25088- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25089- .fill 5,8,0
25090+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25091+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25092+ .fill 4,8,0
25093
25094 NEXT_PAGE(level1_fixmap_pgt)
25095 .fill 512,8,0
25096
25097+NEXT_PAGE(level1_vsyscall_pgt)
25098+ .fill 512,8,0
25099+
25100 #undef PMDS
25101
25102- .data
25103+ .align PAGE_SIZE
25104+ENTRY(cpu_gdt_table)
25105+ .rept NR_CPUS
25106+ .quad 0x0000000000000000 /* NULL descriptor */
25107+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25108+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25109+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25110+ .quad 0x00cffb000000ffff /* __USER32_CS */
25111+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25112+ .quad 0x00affb000000ffff /* __USER_CS */
25113+
25114+#ifdef CONFIG_PAX_KERNEXEC
25115+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25116+#else
25117+ .quad 0x0 /* unused */
25118+#endif
25119+
25120+ .quad 0,0 /* TSS */
25121+ .quad 0,0 /* LDT */
25122+ .quad 0,0,0 /* three TLS descriptors */
25123+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25124+ /* asm/segment.h:GDT_ENTRIES must match this */
25125+
25126+#ifdef CONFIG_PAX_MEMORY_UDEREF
25127+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25128+#else
25129+ .quad 0x0 /* unused */
25130+#endif
25131+
25132+ /* zero the remaining page */
25133+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25134+ .endr
25135+
25136 .align 16
25137 .globl early_gdt_descr
25138 early_gdt_descr:
25139 .word GDT_ENTRIES*8-1
25140 early_gdt_descr_base:
25141- .quad INIT_PER_CPU_VAR(gdt_page)
25142+ .quad cpu_gdt_table
25143
25144 ENTRY(phys_base)
25145 /* This must match the first entry in level2_kernel_pgt */
25146 .quad 0x0000000000000000
25147
25148 #include "../../x86/xen/xen-head.S"
25149-
25150- __PAGE_ALIGNED_BSS
25151+
25152+ .section .rodata,"a",@progbits
25153 NEXT_PAGE(empty_zero_page)
25154 .skip PAGE_SIZE
25155diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25156index 05fd74f..c3548b1 100644
25157--- a/arch/x86/kernel/i386_ksyms_32.c
25158+++ b/arch/x86/kernel/i386_ksyms_32.c
25159@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25160 EXPORT_SYMBOL(cmpxchg8b_emu);
25161 #endif
25162
25163+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25164+
25165 /* Networking helper routines. */
25166 EXPORT_SYMBOL(csum_partial_copy_generic);
25167+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25168+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25169
25170 EXPORT_SYMBOL(__get_user_1);
25171 EXPORT_SYMBOL(__get_user_2);
25172@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25173 EXPORT_SYMBOL(___preempt_schedule_context);
25174 #endif
25175 #endif
25176+
25177+#ifdef CONFIG_PAX_KERNEXEC
25178+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25179+#endif
25180+
25181+#ifdef CONFIG_PAX_PER_CPU_PGD
25182+EXPORT_SYMBOL(cpu_pgd);
25183+#endif
25184diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25185index a9a4229..6f4d476 100644
25186--- a/arch/x86/kernel/i387.c
25187+++ b/arch/x86/kernel/i387.c
25188@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25189 static inline bool interrupted_user_mode(void)
25190 {
25191 struct pt_regs *regs = get_irq_regs();
25192- return regs && user_mode_vm(regs);
25193+ return regs && user_mode(regs);
25194 }
25195
25196 /*
25197diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25198index e7cc537..67d7372 100644
25199--- a/arch/x86/kernel/i8259.c
25200+++ b/arch/x86/kernel/i8259.c
25201@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25202 static void make_8259A_irq(unsigned int irq)
25203 {
25204 disable_irq_nosync(irq);
25205- io_apic_irqs &= ~(1<<irq);
25206+ io_apic_irqs &= ~(1UL<<irq);
25207 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25208 enable_irq(irq);
25209 }
25210@@ -208,7 +208,7 @@ spurious_8259A_irq:
25211 "spurious 8259A interrupt: IRQ%d.\n", irq);
25212 spurious_irq_mask |= irqmask;
25213 }
25214- atomic_inc(&irq_err_count);
25215+ atomic_inc_unchecked(&irq_err_count);
25216 /*
25217 * Theoretically we do not have to handle this IRQ,
25218 * but in Linux this does not cause problems and is
25219@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25220 /* (slave's support for AEOI in flat mode is to be investigated) */
25221 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25222
25223+ pax_open_kernel();
25224 if (auto_eoi)
25225 /*
25226 * In AEOI mode we just have to mask the interrupt
25227 * when acking.
25228 */
25229- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25230+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25231 else
25232- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25233+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25234+ pax_close_kernel();
25235
25236 udelay(100); /* wait for 8259A to initialize */
25237
25238diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25239index a979b5b..1d6db75 100644
25240--- a/arch/x86/kernel/io_delay.c
25241+++ b/arch/x86/kernel/io_delay.c
25242@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25243 * Quirk table for systems that misbehave (lock up, etc.) if port
25244 * 0x80 is used:
25245 */
25246-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25247+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25248 {
25249 .callback = dmi_io_delay_0xed_port,
25250 .ident = "Compaq Presario V6000",
25251diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25252index 4ddaf66..49d5c18 100644
25253--- a/arch/x86/kernel/ioport.c
25254+++ b/arch/x86/kernel/ioport.c
25255@@ -6,6 +6,7 @@
25256 #include <linux/sched.h>
25257 #include <linux/kernel.h>
25258 #include <linux/capability.h>
25259+#include <linux/security.h>
25260 #include <linux/errno.h>
25261 #include <linux/types.h>
25262 #include <linux/ioport.h>
25263@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25264 return -EINVAL;
25265 if (turn_on && !capable(CAP_SYS_RAWIO))
25266 return -EPERM;
25267+#ifdef CONFIG_GRKERNSEC_IO
25268+ if (turn_on && grsec_disable_privio) {
25269+ gr_handle_ioperm();
25270+ return -ENODEV;
25271+ }
25272+#endif
25273
25274 /*
25275 * If it's the first ioperm() call in this thread's lifetime, set the
25276@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25277 * because the ->io_bitmap_max value must match the bitmap
25278 * contents:
25279 */
25280- tss = &per_cpu(init_tss, get_cpu());
25281+ tss = init_tss + get_cpu();
25282
25283 if (turn_on)
25284 bitmap_clear(t->io_bitmap_ptr, from, num);
25285@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25286 if (level > old) {
25287 if (!capable(CAP_SYS_RAWIO))
25288 return -EPERM;
25289+#ifdef CONFIG_GRKERNSEC_IO
25290+ if (grsec_disable_privio) {
25291+ gr_handle_iopl();
25292+ return -ENODEV;
25293+ }
25294+#endif
25295 }
25296 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25297 t->iopl = level << 12;
25298diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25299index 705ef8d..8672c9d 100644
25300--- a/arch/x86/kernel/irq.c
25301+++ b/arch/x86/kernel/irq.c
25302@@ -22,7 +22,7 @@
25303 #define CREATE_TRACE_POINTS
25304 #include <asm/trace/irq_vectors.h>
25305
25306-atomic_t irq_err_count;
25307+atomic_unchecked_t irq_err_count;
25308
25309 /* Function pointer for generic interrupt vector handling */
25310 void (*x86_platform_ipi_callback)(void) = NULL;
25311@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25312 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25313 seq_puts(p, " Hypervisor callback interrupts\n");
25314 #endif
25315- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25316+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25317 #if defined(CONFIG_X86_IO_APIC)
25318- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25319+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25320 #endif
25321 return 0;
25322 }
25323@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25324
25325 u64 arch_irq_stat(void)
25326 {
25327- u64 sum = atomic_read(&irq_err_count);
25328+ u64 sum = atomic_read_unchecked(&irq_err_count);
25329 return sum;
25330 }
25331
25332diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25333index 63ce838..2ea3e06 100644
25334--- a/arch/x86/kernel/irq_32.c
25335+++ b/arch/x86/kernel/irq_32.c
25336@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25337
25338 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25339
25340+extern void gr_handle_kernel_exploit(void);
25341+
25342 int sysctl_panic_on_stackoverflow __read_mostly;
25343
25344 /* Debugging check for stack overflow: is there less than 1KB free? */
25345@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25346 __asm__ __volatile__("andl %%esp,%0" :
25347 "=r" (sp) : "0" (THREAD_SIZE - 1));
25348
25349- return sp < (sizeof(struct thread_info) + STACK_WARN);
25350+ return sp < STACK_WARN;
25351 }
25352
25353 static void print_stack_overflow(void)
25354 {
25355 printk(KERN_WARNING "low stack detected by irq handler\n");
25356 dump_stack();
25357+ gr_handle_kernel_exploit();
25358 if (sysctl_panic_on_stackoverflow)
25359 panic("low stack detected by irq handler - check messages\n");
25360 }
25361@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25362 static inline int
25363 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25364 {
25365- struct irq_stack *curstk, *irqstk;
25366+ struct irq_stack *irqstk;
25367 u32 *isp, *prev_esp, arg1, arg2;
25368
25369- curstk = (struct irq_stack *) current_stack();
25370 irqstk = __this_cpu_read(hardirq_stack);
25371
25372 /*
25373@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25374 * handler) we can't do that and just have to keep using the
25375 * current stack (which is the irq stack already after all)
25376 */
25377- if (unlikely(curstk == irqstk))
25378+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25379 return 0;
25380
25381- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25382+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25383
25384 /* Save the next esp at the bottom of the stack */
25385 prev_esp = (u32 *)irqstk;
25386 *prev_esp = current_stack_pointer;
25387
25388+#ifdef CONFIG_PAX_MEMORY_UDEREF
25389+ __set_fs(MAKE_MM_SEG(0));
25390+#endif
25391+
25392 if (unlikely(overflow))
25393 call_on_stack(print_stack_overflow, isp);
25394
25395@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25396 : "0" (irq), "1" (desc), "2" (isp),
25397 "D" (desc->handle_irq)
25398 : "memory", "cc", "ecx");
25399+
25400+#ifdef CONFIG_PAX_MEMORY_UDEREF
25401+ __set_fs(current_thread_info()->addr_limit);
25402+#endif
25403+
25404 return 1;
25405 }
25406
25407@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25408 */
25409 void irq_ctx_init(int cpu)
25410 {
25411- struct irq_stack *irqstk;
25412-
25413 if (per_cpu(hardirq_stack, cpu))
25414 return;
25415
25416- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25417- THREADINFO_GFP,
25418- THREAD_SIZE_ORDER));
25419- per_cpu(hardirq_stack, cpu) = irqstk;
25420-
25421- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25422- THREADINFO_GFP,
25423- THREAD_SIZE_ORDER));
25424- per_cpu(softirq_stack, cpu) = irqstk;
25425-
25426- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25427- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25428+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25429+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25430 }
25431
25432 void do_softirq_own_stack(void)
25433 {
25434- struct thread_info *curstk;
25435 struct irq_stack *irqstk;
25436 u32 *isp, *prev_esp;
25437
25438- curstk = current_stack();
25439 irqstk = __this_cpu_read(softirq_stack);
25440
25441 /* build the stack frame on the softirq stack */
25442@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25443 prev_esp = (u32 *)irqstk;
25444 *prev_esp = current_stack_pointer;
25445
25446+#ifdef CONFIG_PAX_MEMORY_UDEREF
25447+ __set_fs(MAKE_MM_SEG(0));
25448+#endif
25449+
25450 call_on_stack(__do_softirq, isp);
25451+
25452+#ifdef CONFIG_PAX_MEMORY_UDEREF
25453+ __set_fs(current_thread_info()->addr_limit);
25454+#endif
25455+
25456 }
25457
25458 bool handle_irq(unsigned irq, struct pt_regs *regs)
25459@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25460 if (unlikely(!desc))
25461 return false;
25462
25463- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25464+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25465 if (unlikely(overflow))
25466 print_stack_overflow();
25467 desc->handle_irq(irq, desc);
25468diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25469index e4b503d..824fce8 100644
25470--- a/arch/x86/kernel/irq_64.c
25471+++ b/arch/x86/kernel/irq_64.c
25472@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25473 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25474 EXPORT_PER_CPU_SYMBOL(irq_regs);
25475
25476+extern void gr_handle_kernel_exploit(void);
25477+
25478 int sysctl_panic_on_stackoverflow;
25479
25480 /*
25481@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25482 u64 estack_top, estack_bottom;
25483 u64 curbase = (u64)task_stack_page(current);
25484
25485- if (user_mode_vm(regs))
25486+ if (user_mode(regs))
25487 return;
25488
25489 if (regs->sp >= curbase + sizeof(struct thread_info) +
25490@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25491 irq_stack_top, irq_stack_bottom,
25492 estack_top, estack_bottom);
25493
25494+ gr_handle_kernel_exploit();
25495+
25496 if (sysctl_panic_on_stackoverflow)
25497 panic("low stack detected by irq handler - check messages\n");
25498 #endif
25499diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25500index 26d5a55..a01160a 100644
25501--- a/arch/x86/kernel/jump_label.c
25502+++ b/arch/x86/kernel/jump_label.c
25503@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25504 * Jump label is enabled for the first time.
25505 * So we expect a default_nop...
25506 */
25507- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25508+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25509 != 0))
25510 bug_at((void *)entry->code, __LINE__);
25511 } else {
25512@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25513 * ...otherwise expect an ideal_nop. Otherwise
25514 * something went horribly wrong.
25515 */
25516- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25517+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25518 != 0))
25519 bug_at((void *)entry->code, __LINE__);
25520 }
25521@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25522 * are converting the default nop to the ideal nop.
25523 */
25524 if (init) {
25525- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25526+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25527 bug_at((void *)entry->code, __LINE__);
25528 } else {
25529 code.jump = 0xe9;
25530 code.offset = entry->target -
25531 (entry->code + JUMP_LABEL_NOP_SIZE);
25532- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25533+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25534 bug_at((void *)entry->code, __LINE__);
25535 }
25536 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25537diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25538index 7ec1d5f..5a7d130 100644
25539--- a/arch/x86/kernel/kgdb.c
25540+++ b/arch/x86/kernel/kgdb.c
25541@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25542 #ifdef CONFIG_X86_32
25543 switch (regno) {
25544 case GDB_SS:
25545- if (!user_mode_vm(regs))
25546+ if (!user_mode(regs))
25547 *(unsigned long *)mem = __KERNEL_DS;
25548 break;
25549 case GDB_SP:
25550- if (!user_mode_vm(regs))
25551+ if (!user_mode(regs))
25552 *(unsigned long *)mem = kernel_stack_pointer(regs);
25553 break;
25554 case GDB_GS:
25555@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25556 bp->attr.bp_addr = breakinfo[breakno].addr;
25557 bp->attr.bp_len = breakinfo[breakno].len;
25558 bp->attr.bp_type = breakinfo[breakno].type;
25559- info->address = breakinfo[breakno].addr;
25560+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25561+ info->address = ktla_ktva(breakinfo[breakno].addr);
25562+ else
25563+ info->address = breakinfo[breakno].addr;
25564 info->len = breakinfo[breakno].len;
25565 info->type = breakinfo[breakno].type;
25566 val = arch_install_hw_breakpoint(bp);
25567@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25568 case 'k':
25569 /* clear the trace bit */
25570 linux_regs->flags &= ~X86_EFLAGS_TF;
25571- atomic_set(&kgdb_cpu_doing_single_step, -1);
25572+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25573
25574 /* set the trace bit if we're stepping */
25575 if (remcomInBuffer[0] == 's') {
25576 linux_regs->flags |= X86_EFLAGS_TF;
25577- atomic_set(&kgdb_cpu_doing_single_step,
25578+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25579 raw_smp_processor_id());
25580 }
25581
25582@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25583
25584 switch (cmd) {
25585 case DIE_DEBUG:
25586- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25587+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25588 if (user_mode(regs))
25589 return single_step_cont(regs, args);
25590 break;
25591@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25592 #endif /* CONFIG_DEBUG_RODATA */
25593
25594 bpt->type = BP_BREAKPOINT;
25595- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25596+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25597 BREAK_INSTR_SIZE);
25598 if (err)
25599 return err;
25600- err = probe_kernel_write((char *)bpt->bpt_addr,
25601+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25602 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25603 #ifdef CONFIG_DEBUG_RODATA
25604 if (!err)
25605@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25606 return -EBUSY;
25607 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25608 BREAK_INSTR_SIZE);
25609- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25610+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25611 if (err)
25612 return err;
25613 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25614@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25615 if (mutex_is_locked(&text_mutex))
25616 goto knl_write;
25617 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25618- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25619+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25620 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25621 goto knl_write;
25622 return err;
25623 knl_write:
25624 #endif /* CONFIG_DEBUG_RODATA */
25625- return probe_kernel_write((char *)bpt->bpt_addr,
25626+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25627 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25628 }
25629
25630diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25631index 98f654d..ac04352 100644
25632--- a/arch/x86/kernel/kprobes/core.c
25633+++ b/arch/x86/kernel/kprobes/core.c
25634@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25635 s32 raddr;
25636 } __packed *insn;
25637
25638- insn = (struct __arch_relative_insn *)from;
25639+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25640+
25641+ pax_open_kernel();
25642 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25643 insn->op = op;
25644+ pax_close_kernel();
25645 }
25646
25647 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25648@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25649 kprobe_opcode_t opcode;
25650 kprobe_opcode_t *orig_opcodes = opcodes;
25651
25652- if (search_exception_tables((unsigned long)opcodes))
25653+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25654 return 0; /* Page fault may occur on this address. */
25655
25656 retry:
25657@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25658 * for the first byte, we can recover the original instruction
25659 * from it and kp->opcode.
25660 */
25661- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25662+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25663 buf[0] = kp->opcode;
25664- return (unsigned long)buf;
25665+ return ktva_ktla((unsigned long)buf);
25666 }
25667
25668 /*
25669@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25670 /* Another subsystem puts a breakpoint, failed to recover */
25671 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25672 return 0;
25673+ pax_open_kernel();
25674 memcpy(dest, insn.kaddr, insn.length);
25675+ pax_close_kernel();
25676
25677 #ifdef CONFIG_X86_64
25678 if (insn_rip_relative(&insn)) {
25679@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25680 return 0;
25681 }
25682 disp = (u8 *) dest + insn_offset_displacement(&insn);
25683+ pax_open_kernel();
25684 *(s32 *) disp = (s32) newdisp;
25685+ pax_close_kernel();
25686 }
25687 #endif
25688 return insn.length;
25689@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25690 * nor set current_kprobe, because it doesn't use single
25691 * stepping.
25692 */
25693- regs->ip = (unsigned long)p->ainsn.insn;
25694+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25695 preempt_enable_no_resched();
25696 return;
25697 }
25698@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25699 regs->flags &= ~X86_EFLAGS_IF;
25700 /* single step inline if the instruction is an int3 */
25701 if (p->opcode == BREAKPOINT_INSTRUCTION)
25702- regs->ip = (unsigned long)p->addr;
25703+ regs->ip = ktla_ktva((unsigned long)p->addr);
25704 else
25705- regs->ip = (unsigned long)p->ainsn.insn;
25706+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25707 }
25708 NOKPROBE_SYMBOL(setup_singlestep);
25709
25710@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25711 struct kprobe *p;
25712 struct kprobe_ctlblk *kcb;
25713
25714- if (user_mode_vm(regs))
25715+ if (user_mode(regs))
25716 return 0;
25717
25718 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25719@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25720 setup_singlestep(p, regs, kcb, 0);
25721 return 1;
25722 }
25723- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25724+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25725 /*
25726 * The breakpoint instruction was removed right
25727 * after we hit it. Another cpu has removed
25728@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25729 " movq %rax, 152(%rsp)\n"
25730 RESTORE_REGS_STRING
25731 " popfq\n"
25732+#ifdef KERNEXEC_PLUGIN
25733+ " btsq $63,(%rsp)\n"
25734+#endif
25735 #else
25736 " pushf\n"
25737 SAVE_REGS_STRING
25738@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25739 struct kprobe_ctlblk *kcb)
25740 {
25741 unsigned long *tos = stack_addr(regs);
25742- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25743+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25744 unsigned long orig_ip = (unsigned long)p->addr;
25745 kprobe_opcode_t *insn = p->ainsn.insn;
25746
25747@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25748 struct die_args *args = data;
25749 int ret = NOTIFY_DONE;
25750
25751- if (args->regs && user_mode_vm(args->regs))
25752+ if (args->regs && user_mode(args->regs))
25753 return ret;
25754
25755 if (val == DIE_GPF) {
25756diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25757index 7c523bb..01b051b 100644
25758--- a/arch/x86/kernel/kprobes/opt.c
25759+++ b/arch/x86/kernel/kprobes/opt.c
25760@@ -79,6 +79,7 @@ found:
25761 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25762 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25763 {
25764+ pax_open_kernel();
25765 #ifdef CONFIG_X86_64
25766 *addr++ = 0x48;
25767 *addr++ = 0xbf;
25768@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25769 *addr++ = 0xb8;
25770 #endif
25771 *(unsigned long *)addr = val;
25772+ pax_close_kernel();
25773 }
25774
25775 asm (
25776@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25777 * Verify if the address gap is in 2GB range, because this uses
25778 * a relative jump.
25779 */
25780- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25781+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25782 if (abs(rel) > 0x7fffffff) {
25783 __arch_remove_optimized_kprobe(op, 0);
25784 return -ERANGE;
25785@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25786 op->optinsn.size = ret;
25787
25788 /* Copy arch-dep-instance from template */
25789- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25790+ pax_open_kernel();
25791+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25792+ pax_close_kernel();
25793
25794 /* Set probe information */
25795 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25796
25797 /* Set probe function call */
25798- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25799+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25800
25801 /* Set returning jmp instruction at the tail of out-of-line buffer */
25802- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25803+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25804 (u8 *)op->kp.addr + op->optinsn.size);
25805
25806 flush_icache_range((unsigned long) buf,
25807@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25808 WARN_ON(kprobe_disabled(&op->kp));
25809
25810 /* Backup instructions which will be replaced by jump address */
25811- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25812+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25813 RELATIVE_ADDR_SIZE);
25814
25815 insn_buf[0] = RELATIVEJUMP_OPCODE;
25816@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25817 /* This kprobe is really able to run optimized path. */
25818 op = container_of(p, struct optimized_kprobe, kp);
25819 /* Detour through copied instructions */
25820- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25821+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25822 if (!reenter)
25823 reset_current_kprobe();
25824 preempt_enable_no_resched();
25825diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25826index c2bedae..25e7ab60 100644
25827--- a/arch/x86/kernel/ksysfs.c
25828+++ b/arch/x86/kernel/ksysfs.c
25829@@ -184,7 +184,7 @@ out:
25830
25831 static struct kobj_attribute type_attr = __ATTR_RO(type);
25832
25833-static struct bin_attribute data_attr = {
25834+static bin_attribute_no_const data_attr __read_only = {
25835 .attr = {
25836 .name = "data",
25837 .mode = S_IRUGO,
25838diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25839index c37886d..d851d32 100644
25840--- a/arch/x86/kernel/ldt.c
25841+++ b/arch/x86/kernel/ldt.c
25842@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25843 if (reload) {
25844 #ifdef CONFIG_SMP
25845 preempt_disable();
25846- load_LDT(pc);
25847+ load_LDT_nolock(pc);
25848 if (!cpumask_equal(mm_cpumask(current->mm),
25849 cpumask_of(smp_processor_id())))
25850 smp_call_function(flush_ldt, current->mm, 1);
25851 preempt_enable();
25852 #else
25853- load_LDT(pc);
25854+ load_LDT_nolock(pc);
25855 #endif
25856 }
25857 if (oldsize) {
25858@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25859 return err;
25860
25861 for (i = 0; i < old->size; i++)
25862- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25863+ write_ldt_entry(new->ldt, i, old->ldt + i);
25864 return 0;
25865 }
25866
25867@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25868 retval = copy_ldt(&mm->context, &old_mm->context);
25869 mutex_unlock(&old_mm->context.lock);
25870 }
25871+
25872+ if (tsk == current) {
25873+ mm->context.vdso = 0;
25874+
25875+#ifdef CONFIG_X86_32
25876+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25877+ mm->context.user_cs_base = 0UL;
25878+ mm->context.user_cs_limit = ~0UL;
25879+
25880+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25881+ cpus_clear(mm->context.cpu_user_cs_mask);
25882+#endif
25883+
25884+#endif
25885+#endif
25886+
25887+ }
25888+
25889 return retval;
25890 }
25891
25892@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25893 }
25894 }
25895
25896+#ifdef CONFIG_PAX_SEGMEXEC
25897+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25898+ error = -EINVAL;
25899+ goto out_unlock;
25900+ }
25901+#endif
25902+
25903 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25904 error = -EINVAL;
25905 goto out_unlock;
25906diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25907index 469b23d..5449cfe 100644
25908--- a/arch/x86/kernel/machine_kexec_32.c
25909+++ b/arch/x86/kernel/machine_kexec_32.c
25910@@ -26,7 +26,7 @@
25911 #include <asm/cacheflush.h>
25912 #include <asm/debugreg.h>
25913
25914-static void set_idt(void *newidt, __u16 limit)
25915+static void set_idt(struct desc_struct *newidt, __u16 limit)
25916 {
25917 struct desc_ptr curidt;
25918
25919@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25920 }
25921
25922
25923-static void set_gdt(void *newgdt, __u16 limit)
25924+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25925 {
25926 struct desc_ptr curgdt;
25927
25928@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25929 }
25930
25931 control_page = page_address(image->control_code_page);
25932- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25933+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25934
25935 relocate_kernel_ptr = control_page;
25936 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25937diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25938index 94ea120..4154cea 100644
25939--- a/arch/x86/kernel/mcount_64.S
25940+++ b/arch/x86/kernel/mcount_64.S
25941@@ -7,7 +7,7 @@
25942 #include <linux/linkage.h>
25943 #include <asm/ptrace.h>
25944 #include <asm/ftrace.h>
25945-
25946+#include <asm/alternative-asm.h>
25947
25948 .code64
25949 .section .entry.text, "ax"
25950@@ -148,8 +148,9 @@
25951 #ifdef CONFIG_DYNAMIC_FTRACE
25952
25953 ENTRY(function_hook)
25954+ pax_force_retaddr
25955 retq
25956-END(function_hook)
25957+ENDPROC(function_hook)
25958
25959 ENTRY(ftrace_caller)
25960 /* save_mcount_regs fills in first two parameters */
25961@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25962 #endif
25963
25964 GLOBAL(ftrace_stub)
25965+ pax_force_retaddr
25966 retq
25967-END(ftrace_caller)
25968+ENDPROC(ftrace_caller)
25969
25970 ENTRY(ftrace_regs_caller)
25971 /* Save the current flags before any operations that can change them */
25972@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25973
25974 jmp ftrace_return
25975
25976-END(ftrace_regs_caller)
25977+ENDPROC(ftrace_regs_caller)
25978
25979
25980 #else /* ! CONFIG_DYNAMIC_FTRACE */
25981@@ -272,18 +274,20 @@ fgraph_trace:
25982 #endif
25983
25984 GLOBAL(ftrace_stub)
25985+ pax_force_retaddr
25986 retq
25987
25988 trace:
25989 /* save_mcount_regs fills in first two parameters */
25990 save_mcount_regs
25991
25992+ pax_force_fptr ftrace_trace_function
25993 call *ftrace_trace_function
25994
25995 restore_mcount_regs
25996
25997 jmp fgraph_trace
25998-END(function_hook)
25999+ENDPROC(function_hook)
26000 #endif /* CONFIG_DYNAMIC_FTRACE */
26001 #endif /* CONFIG_FUNCTION_TRACER */
26002
26003@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
26004
26005 restore_mcount_regs
26006
26007+ pax_force_retaddr
26008 retq
26009-END(ftrace_graph_caller)
26010+ENDPROC(ftrace_graph_caller)
26011
26012 GLOBAL(return_to_handler)
26013 subq $24, %rsp
26014@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
26015 movq 8(%rsp), %rdx
26016 movq (%rsp), %rax
26017 addq $24, %rsp
26018+ pax_force_fptr %rdi
26019 jmp *%rdi
26020+ENDPROC(return_to_handler)
26021 #endif
26022diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26023index e69f988..72902b7 100644
26024--- a/arch/x86/kernel/module.c
26025+++ b/arch/x86/kernel/module.c
26026@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26027 }
26028 #endif
26029
26030-void *module_alloc(unsigned long size)
26031+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26032 {
26033- if (PAGE_ALIGN(size) > MODULES_LEN)
26034+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26035 return NULL;
26036 return __vmalloc_node_range(size, 1,
26037 MODULES_VADDR + get_module_load_offset(),
26038- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26039- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26040+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26041+ prot, NUMA_NO_NODE,
26042 __builtin_return_address(0));
26043 }
26044
26045+void *module_alloc(unsigned long size)
26046+{
26047+
26048+#ifdef CONFIG_PAX_KERNEXEC
26049+ return __module_alloc(size, PAGE_KERNEL);
26050+#else
26051+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26052+#endif
26053+
26054+}
26055+
26056+#ifdef CONFIG_PAX_KERNEXEC
26057+#ifdef CONFIG_X86_32
26058+void *module_alloc_exec(unsigned long size)
26059+{
26060+ struct vm_struct *area;
26061+
26062+ if (size == 0)
26063+ return NULL;
26064+
26065+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26066+return area ? area->addr : NULL;
26067+}
26068+EXPORT_SYMBOL(module_alloc_exec);
26069+
26070+void module_memfree_exec(void *module_region)
26071+{
26072+ vunmap(module_region);
26073+}
26074+EXPORT_SYMBOL(module_memfree_exec);
26075+#else
26076+void module_memfree_exec(void *module_region)
26077+{
26078+ module_memfree(module_region);
26079+}
26080+EXPORT_SYMBOL(module_memfree_exec);
26081+
26082+void *module_alloc_exec(unsigned long size)
26083+{
26084+ return __module_alloc(size, PAGE_KERNEL_RX);
26085+}
26086+EXPORT_SYMBOL(module_alloc_exec);
26087+#endif
26088+#endif
26089+
26090 #ifdef CONFIG_X86_32
26091 int apply_relocate(Elf32_Shdr *sechdrs,
26092 const char *strtab,
26093@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26094 unsigned int i;
26095 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26096 Elf32_Sym *sym;
26097- uint32_t *location;
26098+ uint32_t *plocation, location;
26099
26100 DEBUGP("Applying relocate section %u to %u\n",
26101 relsec, sechdrs[relsec].sh_info);
26102 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26103 /* This is where to make the change */
26104- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26105- + rel[i].r_offset;
26106+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26107+ location = (uint32_t)plocation;
26108+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26109+ plocation = ktla_ktva((void *)plocation);
26110 /* This is the symbol it is referring to. Note that all
26111 undefined symbols have been resolved. */
26112 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26113@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26114 switch (ELF32_R_TYPE(rel[i].r_info)) {
26115 case R_386_32:
26116 /* We add the value into the location given */
26117- *location += sym->st_value;
26118+ pax_open_kernel();
26119+ *plocation += sym->st_value;
26120+ pax_close_kernel();
26121 break;
26122 case R_386_PC32:
26123 /* Add the value, subtract its position */
26124- *location += sym->st_value - (uint32_t)location;
26125+ pax_open_kernel();
26126+ *plocation += sym->st_value - location;
26127+ pax_close_kernel();
26128 break;
26129 default:
26130 pr_err("%s: Unknown relocation: %u\n",
26131@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26132 case R_X86_64_NONE:
26133 break;
26134 case R_X86_64_64:
26135+ pax_open_kernel();
26136 *(u64 *)loc = val;
26137+ pax_close_kernel();
26138 break;
26139 case R_X86_64_32:
26140+ pax_open_kernel();
26141 *(u32 *)loc = val;
26142+ pax_close_kernel();
26143 if (val != *(u32 *)loc)
26144 goto overflow;
26145 break;
26146 case R_X86_64_32S:
26147+ pax_open_kernel();
26148 *(s32 *)loc = val;
26149+ pax_close_kernel();
26150 if ((s64)val != *(s32 *)loc)
26151 goto overflow;
26152 break;
26153 case R_X86_64_PC32:
26154 val -= (u64)loc;
26155+ pax_open_kernel();
26156 *(u32 *)loc = val;
26157+ pax_close_kernel();
26158+
26159 #if 0
26160 if ((s64)val != *(s32 *)loc)
26161 goto overflow;
26162diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26163index 113e707..0a690e1 100644
26164--- a/arch/x86/kernel/msr.c
26165+++ b/arch/x86/kernel/msr.c
26166@@ -39,6 +39,7 @@
26167 #include <linux/notifier.h>
26168 #include <linux/uaccess.h>
26169 #include <linux/gfp.h>
26170+#include <linux/grsecurity.h>
26171
26172 #include <asm/processor.h>
26173 #include <asm/msr.h>
26174@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26175 int err = 0;
26176 ssize_t bytes = 0;
26177
26178+#ifdef CONFIG_GRKERNSEC_KMEM
26179+ gr_handle_msr_write();
26180+ return -EPERM;
26181+#endif
26182+
26183 if (count % 8)
26184 return -EINVAL; /* Invalid chunk size */
26185
26186@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26187 err = -EBADF;
26188 break;
26189 }
26190+#ifdef CONFIG_GRKERNSEC_KMEM
26191+ gr_handle_msr_write();
26192+ return -EPERM;
26193+#endif
26194 if (copy_from_user(&regs, uregs, sizeof regs)) {
26195 err = -EFAULT;
26196 break;
26197@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26198 return notifier_from_errno(err);
26199 }
26200
26201-static struct notifier_block __refdata msr_class_cpu_notifier = {
26202+static struct notifier_block msr_class_cpu_notifier = {
26203 .notifier_call = msr_class_cpu_callback,
26204 };
26205
26206diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26207index c3e985d..110a36a 100644
26208--- a/arch/x86/kernel/nmi.c
26209+++ b/arch/x86/kernel/nmi.c
26210@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26211
26212 static void nmi_max_handler(struct irq_work *w)
26213 {
26214- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26215+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26216 int remainder_ns, decimal_msecs;
26217- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26218+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26219
26220 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26221 decimal_msecs = remainder_ns / 1000;
26222
26223 printk_ratelimited(KERN_INFO
26224 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26225- a->handler, whole_msecs, decimal_msecs);
26226+ n->action->handler, whole_msecs, decimal_msecs);
26227 }
26228
26229 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26230@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26231 delta = sched_clock() - delta;
26232 trace_nmi_handler(a->handler, (int)delta, thishandled);
26233
26234- if (delta < nmi_longest_ns || delta < a->max_duration)
26235+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26236 continue;
26237
26238- a->max_duration = delta;
26239- irq_work_queue(&a->irq_work);
26240+ a->work->max_duration = delta;
26241+ irq_work_queue(&a->work->irq_work);
26242 }
26243
26244 rcu_read_unlock();
26245@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26246 }
26247 NOKPROBE_SYMBOL(nmi_handle);
26248
26249-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26250+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26251 {
26252 struct nmi_desc *desc = nmi_to_desc(type);
26253 unsigned long flags;
26254@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26255 if (!action->handler)
26256 return -EINVAL;
26257
26258- init_irq_work(&action->irq_work, nmi_max_handler);
26259+ action->work->action = action;
26260+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26261
26262 spin_lock_irqsave(&desc->lock, flags);
26263
26264@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26265 * event confuses some handlers (kdump uses this flag)
26266 */
26267 if (action->flags & NMI_FLAG_FIRST)
26268- list_add_rcu(&action->list, &desc->head);
26269+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26270 else
26271- list_add_tail_rcu(&action->list, &desc->head);
26272+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26273
26274 spin_unlock_irqrestore(&desc->lock, flags);
26275 return 0;
26276@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26277 if (!strcmp(n->name, name)) {
26278 WARN(in_nmi(),
26279 "Trying to free NMI (%s) from NMI context!\n", n->name);
26280- list_del_rcu(&n->list);
26281+ pax_list_del_rcu((struct list_head *)&n->list);
26282 break;
26283 }
26284 }
26285@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26286 dotraplinkage notrace void
26287 do_nmi(struct pt_regs *regs, long error_code)
26288 {
26289+
26290+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26291+ if (!user_mode(regs)) {
26292+ unsigned long cs = regs->cs & 0xFFFF;
26293+ unsigned long ip = ktva_ktla(regs->ip);
26294+
26295+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26296+ regs->ip = ip;
26297+ }
26298+#endif
26299+
26300 nmi_nesting_preprocess(regs);
26301
26302 nmi_enter();
26303diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26304index 6d9582e..f746287 100644
26305--- a/arch/x86/kernel/nmi_selftest.c
26306+++ b/arch/x86/kernel/nmi_selftest.c
26307@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26308 {
26309 /* trap all the unknown NMIs we may generate */
26310 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26311- __initdata);
26312+ __initconst);
26313 }
26314
26315 static void __init cleanup_nmi_testsuite(void)
26316@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26317 unsigned long timeout;
26318
26319 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26320- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26321+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26322 nmi_fail = FAILURE;
26323 return;
26324 }
26325diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26326index bbb6c73..24a58ef 100644
26327--- a/arch/x86/kernel/paravirt-spinlocks.c
26328+++ b/arch/x86/kernel/paravirt-spinlocks.c
26329@@ -8,7 +8,7 @@
26330
26331 #include <asm/paravirt.h>
26332
26333-struct pv_lock_ops pv_lock_ops = {
26334+struct pv_lock_ops pv_lock_ops __read_only = {
26335 #ifdef CONFIG_SMP
26336 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26337 .unlock_kick = paravirt_nop,
26338diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26339index 548d25f..f8fb99c 100644
26340--- a/arch/x86/kernel/paravirt.c
26341+++ b/arch/x86/kernel/paravirt.c
26342@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26343 {
26344 return x;
26345 }
26346+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26347+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26348+#endif
26349
26350 void __init default_banner(void)
26351 {
26352@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26353
26354 if (opfunc == NULL)
26355 /* If there's no function, patch it with a ud2a (BUG) */
26356- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26357- else if (opfunc == _paravirt_nop)
26358+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26359+ else if (opfunc == (void *)_paravirt_nop)
26360 /* If the operation is a nop, then nop the callsite */
26361 ret = paravirt_patch_nop();
26362
26363 /* identity functions just return their single argument */
26364- else if (opfunc == _paravirt_ident_32)
26365+ else if (opfunc == (void *)_paravirt_ident_32)
26366 ret = paravirt_patch_ident_32(insnbuf, len);
26367- else if (opfunc == _paravirt_ident_64)
26368+ else if (opfunc == (void *)_paravirt_ident_64)
26369 ret = paravirt_patch_ident_64(insnbuf, len);
26370+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26371+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26372+ ret = paravirt_patch_ident_64(insnbuf, len);
26373+#endif
26374
26375 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26376 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26377@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26378 if (insn_len > len || start == NULL)
26379 insn_len = len;
26380 else
26381- memcpy(insnbuf, start, insn_len);
26382+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26383
26384 return insn_len;
26385 }
26386@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26387 return this_cpu_read(paravirt_lazy_mode);
26388 }
26389
26390-struct pv_info pv_info = {
26391+struct pv_info pv_info __read_only = {
26392 .name = "bare hardware",
26393 .paravirt_enabled = 0,
26394 .kernel_rpl = 0,
26395@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26396 #endif
26397 };
26398
26399-struct pv_init_ops pv_init_ops = {
26400+struct pv_init_ops pv_init_ops __read_only = {
26401 .patch = native_patch,
26402 };
26403
26404-struct pv_time_ops pv_time_ops = {
26405+struct pv_time_ops pv_time_ops __read_only = {
26406 .sched_clock = native_sched_clock,
26407 .steal_clock = native_steal_clock,
26408 };
26409
26410-__visible struct pv_irq_ops pv_irq_ops = {
26411+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26412 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26413 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26414 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26415@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26416 #endif
26417 };
26418
26419-__visible struct pv_cpu_ops pv_cpu_ops = {
26420+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26421 .cpuid = native_cpuid,
26422 .get_debugreg = native_get_debugreg,
26423 .set_debugreg = native_set_debugreg,
26424@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26425 NOKPROBE_SYMBOL(native_set_debugreg);
26426 NOKPROBE_SYMBOL(native_load_idt);
26427
26428-struct pv_apic_ops pv_apic_ops = {
26429+struct pv_apic_ops pv_apic_ops __read_only= {
26430 #ifdef CONFIG_X86_LOCAL_APIC
26431 .startup_ipi_hook = paravirt_nop,
26432 #endif
26433 };
26434
26435-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26436+#ifdef CONFIG_X86_32
26437+#ifdef CONFIG_X86_PAE
26438+/* 64-bit pagetable entries */
26439+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26440+#else
26441 /* 32-bit pagetable entries */
26442 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26443+#endif
26444 #else
26445 /* 64-bit pagetable entries */
26446 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26447 #endif
26448
26449-struct pv_mmu_ops pv_mmu_ops = {
26450+struct pv_mmu_ops pv_mmu_ops __read_only = {
26451
26452 .read_cr2 = native_read_cr2,
26453 .write_cr2 = native_write_cr2,
26454@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26455 .make_pud = PTE_IDENT,
26456
26457 .set_pgd = native_set_pgd,
26458+ .set_pgd_batched = native_set_pgd_batched,
26459 #endif
26460 #endif /* PAGETABLE_LEVELS >= 3 */
26461
26462@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26463 },
26464
26465 .set_fixmap = native_set_fixmap,
26466+
26467+#ifdef CONFIG_PAX_KERNEXEC
26468+ .pax_open_kernel = native_pax_open_kernel,
26469+ .pax_close_kernel = native_pax_close_kernel,
26470+#endif
26471+
26472 };
26473
26474 EXPORT_SYMBOL_GPL(pv_time_ops);
26475diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26476index a1da673..b6f5831 100644
26477--- a/arch/x86/kernel/paravirt_patch_64.c
26478+++ b/arch/x86/kernel/paravirt_patch_64.c
26479@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26480 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26481 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26482 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26483+
26484+#ifndef CONFIG_PAX_MEMORY_UDEREF
26485 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26486+#endif
26487+
26488 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26489 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26490
26491@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26492 PATCH_SITE(pv_mmu_ops, read_cr3);
26493 PATCH_SITE(pv_mmu_ops, write_cr3);
26494 PATCH_SITE(pv_cpu_ops, clts);
26495+
26496+#ifndef CONFIG_PAX_MEMORY_UDEREF
26497 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26498+#endif
26499+
26500 PATCH_SITE(pv_cpu_ops, wbinvd);
26501
26502 patch_site:
26503diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26504index 0497f71..7186c0d 100644
26505--- a/arch/x86/kernel/pci-calgary_64.c
26506+++ b/arch/x86/kernel/pci-calgary_64.c
26507@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26508 tce_space = be64_to_cpu(readq(target));
26509 tce_space = tce_space & TAR_SW_BITS;
26510
26511- tce_space = tce_space & (~specified_table_size);
26512+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26513 info->tce_space = (u64 *)__va(tce_space);
26514 }
26515 }
26516diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26517index 35ccf75..7a15747 100644
26518--- a/arch/x86/kernel/pci-iommu_table.c
26519+++ b/arch/x86/kernel/pci-iommu_table.c
26520@@ -2,7 +2,7 @@
26521 #include <asm/iommu_table.h>
26522 #include <linux/string.h>
26523 #include <linux/kallsyms.h>
26524-
26525+#include <linux/sched.h>
26526
26527 #define DEBUG 1
26528
26529diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26530index 77dd0ad..9ec4723 100644
26531--- a/arch/x86/kernel/pci-swiotlb.c
26532+++ b/arch/x86/kernel/pci-swiotlb.c
26533@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26534 struct dma_attrs *attrs)
26535 {
26536 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26537- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26538+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26539 else
26540 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26541 }
26542diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26543index e127dda..94e384d 100644
26544--- a/arch/x86/kernel/process.c
26545+++ b/arch/x86/kernel/process.c
26546@@ -36,7 +36,8 @@
26547 * section. Since TSS's are completely CPU-local, we want them
26548 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26549 */
26550-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26551+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26552+EXPORT_SYMBOL(init_tss);
26553
26554 #ifdef CONFIG_X86_64
26555 static DEFINE_PER_CPU(unsigned char, is_idle);
26556@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26557 task_xstate_cachep =
26558 kmem_cache_create("task_xstate", xstate_size,
26559 __alignof__(union thread_xstate),
26560- SLAB_PANIC | SLAB_NOTRACK, NULL);
26561+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26562 setup_xstate_comp();
26563 }
26564
26565@@ -108,7 +109,7 @@ void exit_thread(void)
26566 unsigned long *bp = t->io_bitmap_ptr;
26567
26568 if (bp) {
26569- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26570+ struct tss_struct *tss = init_tss + get_cpu();
26571
26572 t->io_bitmap_ptr = NULL;
26573 clear_thread_flag(TIF_IO_BITMAP);
26574@@ -128,6 +129,9 @@ void flush_thread(void)
26575 {
26576 struct task_struct *tsk = current;
26577
26578+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26579+ loadsegment(gs, 0);
26580+#endif
26581 flush_ptrace_hw_breakpoint(tsk);
26582 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26583 drop_init_fpu(tsk);
26584@@ -274,7 +278,7 @@ static void __exit_idle(void)
26585 void exit_idle(void)
26586 {
26587 /* idle loop has pid 0 */
26588- if (current->pid)
26589+ if (task_pid_nr(current))
26590 return;
26591 __exit_idle();
26592 }
26593@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26594 return ret;
26595 }
26596 #endif
26597-void stop_this_cpu(void *dummy)
26598+__noreturn void stop_this_cpu(void *dummy)
26599 {
26600 local_irq_disable();
26601 /*
26602@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26603 }
26604 early_param("idle", idle_setup);
26605
26606-unsigned long arch_align_stack(unsigned long sp)
26607+#ifdef CONFIG_PAX_RANDKSTACK
26608+void pax_randomize_kstack(struct pt_regs *regs)
26609 {
26610- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26611- sp -= get_random_int() % 8192;
26612- return sp & ~0xf;
26613-}
26614+ struct thread_struct *thread = &current->thread;
26615+ unsigned long time;
26616
26617-unsigned long arch_randomize_brk(struct mm_struct *mm)
26618-{
26619- unsigned long range_end = mm->brk + 0x02000000;
26620- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26621-}
26622+ if (!randomize_va_space)
26623+ return;
26624+
26625+ if (v8086_mode(regs))
26626+ return;
26627
26628+ rdtscl(time);
26629+
26630+ /* P4 seems to return a 0 LSB, ignore it */
26631+#ifdef CONFIG_MPENTIUM4
26632+ time &= 0x3EUL;
26633+ time <<= 2;
26634+#elif defined(CONFIG_X86_64)
26635+ time &= 0xFUL;
26636+ time <<= 4;
26637+#else
26638+ time &= 0x1FUL;
26639+ time <<= 3;
26640+#endif
26641+
26642+ thread->sp0 ^= time;
26643+ load_sp0(init_tss + smp_processor_id(), thread);
26644+
26645+#ifdef CONFIG_X86_64
26646+ this_cpu_write(kernel_stack, thread->sp0);
26647+#endif
26648+}
26649+#endif
26650diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26651index 8f3ebfe..cbc731b 100644
26652--- a/arch/x86/kernel/process_32.c
26653+++ b/arch/x86/kernel/process_32.c
26654@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26655 unsigned long thread_saved_pc(struct task_struct *tsk)
26656 {
26657 return ((unsigned long *)tsk->thread.sp)[3];
26658+//XXX return tsk->thread.eip;
26659 }
26660
26661 void __show_regs(struct pt_regs *regs, int all)
26662@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26663 unsigned long sp;
26664 unsigned short ss, gs;
26665
26666- if (user_mode_vm(regs)) {
26667+ if (user_mode(regs)) {
26668 sp = regs->sp;
26669 ss = regs->ss & 0xffff;
26670- gs = get_user_gs(regs);
26671 } else {
26672 sp = kernel_stack_pointer(regs);
26673 savesegment(ss, ss);
26674- savesegment(gs, gs);
26675 }
26676+ gs = get_user_gs(regs);
26677
26678 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26679 (u16)regs->cs, regs->ip, regs->flags,
26680- smp_processor_id());
26681+ raw_smp_processor_id());
26682 print_symbol("EIP is at %s\n", regs->ip);
26683
26684 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26685@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26686 int copy_thread(unsigned long clone_flags, unsigned long sp,
26687 unsigned long arg, struct task_struct *p)
26688 {
26689- struct pt_regs *childregs = task_pt_regs(p);
26690+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26691 struct task_struct *tsk;
26692 int err;
26693
26694 p->thread.sp = (unsigned long) childregs;
26695 p->thread.sp0 = (unsigned long) (childregs+1);
26696+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26697 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26698
26699 if (unlikely(p->flags & PF_KTHREAD)) {
26700 /* kernel thread */
26701 memset(childregs, 0, sizeof(struct pt_regs));
26702 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26703- task_user_gs(p) = __KERNEL_STACK_CANARY;
26704- childregs->ds = __USER_DS;
26705- childregs->es = __USER_DS;
26706+ savesegment(gs, childregs->gs);
26707+ childregs->ds = __KERNEL_DS;
26708+ childregs->es = __KERNEL_DS;
26709 childregs->fs = __KERNEL_PERCPU;
26710 childregs->bx = sp; /* function */
26711 childregs->bp = arg;
26712@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26713 struct thread_struct *prev = &prev_p->thread,
26714 *next = &next_p->thread;
26715 int cpu = smp_processor_id();
26716- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26717+ struct tss_struct *tss = init_tss + cpu;
26718 fpu_switch_t fpu;
26719
26720 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26721@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26722 */
26723 lazy_save_gs(prev->gs);
26724
26725+#ifdef CONFIG_PAX_MEMORY_UDEREF
26726+ __set_fs(task_thread_info(next_p)->addr_limit);
26727+#endif
26728+
26729 /*
26730 * Load the per-thread Thread-Local Storage descriptor.
26731 */
26732@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26733 */
26734 arch_end_context_switch(next_p);
26735
26736- this_cpu_write(kernel_stack,
26737- (unsigned long)task_stack_page(next_p) +
26738- THREAD_SIZE - KERNEL_STACK_OFFSET);
26739+ this_cpu_write(current_task, next_p);
26740+ this_cpu_write(current_tinfo, &next_p->tinfo);
26741+ this_cpu_write(kernel_stack, next->sp0);
26742
26743 /*
26744 * Restore %gs if needed (which is common)
26745@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26746
26747 switch_fpu_finish(next_p, fpu);
26748
26749- this_cpu_write(current_task, next_p);
26750-
26751 return prev_p;
26752 }
26753
26754@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26755 } while (count++ < 16);
26756 return 0;
26757 }
26758-
26759diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26760index 5a2c029..ec8611d 100644
26761--- a/arch/x86/kernel/process_64.c
26762+++ b/arch/x86/kernel/process_64.c
26763@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26764 struct pt_regs *childregs;
26765 struct task_struct *me = current;
26766
26767- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26768+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26769 childregs = task_pt_regs(p);
26770 p->thread.sp = (unsigned long) childregs;
26771 p->thread.usersp = me->thread.usersp;
26772+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26773 set_tsk_thread_flag(p, TIF_FORK);
26774 p->thread.io_bitmap_ptr = NULL;
26775
26776@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26777 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26778 savesegment(es, p->thread.es);
26779 savesegment(ds, p->thread.ds);
26780+ savesegment(ss, p->thread.ss);
26781+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26782 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26783
26784 if (unlikely(p->flags & PF_KTHREAD)) {
26785@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26786 struct thread_struct *prev = &prev_p->thread;
26787 struct thread_struct *next = &next_p->thread;
26788 int cpu = smp_processor_id();
26789- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26790+ struct tss_struct *tss = init_tss + cpu;
26791 unsigned fsindex, gsindex;
26792 fpu_switch_t fpu;
26793
26794@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26795 if (unlikely(next->ds | prev->ds))
26796 loadsegment(ds, next->ds);
26797
26798+ savesegment(ss, prev->ss);
26799+ if (unlikely(next->ss != prev->ss))
26800+ loadsegment(ss, next->ss);
26801+
26802 /*
26803 * Switch FS and GS.
26804 *
26805@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26806 prev->usersp = this_cpu_read(old_rsp);
26807 this_cpu_write(old_rsp, next->usersp);
26808 this_cpu_write(current_task, next_p);
26809+ this_cpu_write(current_tinfo, &next_p->tinfo);
26810
26811 /*
26812 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26813@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26814 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26815 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26816
26817- this_cpu_write(kernel_stack,
26818- (unsigned long)task_stack_page(next_p) +
26819- THREAD_SIZE - KERNEL_STACK_OFFSET);
26820+ this_cpu_write(kernel_stack, next->sp0);
26821
26822 /*
26823 * Now maybe reload the debug registers and handle I/O bitmaps
26824@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26825 if (!p || p == current || p->state == TASK_RUNNING)
26826 return 0;
26827 stack = (unsigned long)task_stack_page(p);
26828- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26829+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26830 return 0;
26831 fp = *(u64 *)(p->thread.sp);
26832 do {
26833- if (fp < (unsigned long)stack ||
26834- fp >= (unsigned long)stack+THREAD_SIZE)
26835+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26836 return 0;
26837 ip = *(u64 *)(fp+8);
26838 if (!in_sched_functions(ip))
26839diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26840index e510618..5165ac0 100644
26841--- a/arch/x86/kernel/ptrace.c
26842+++ b/arch/x86/kernel/ptrace.c
26843@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26844 unsigned long sp = (unsigned long)&regs->sp;
26845 u32 *prev_esp;
26846
26847- if (context == (sp & ~(THREAD_SIZE - 1)))
26848+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26849 return sp;
26850
26851- prev_esp = (u32 *)(context);
26852+ prev_esp = *(u32 **)(context);
26853 if (prev_esp)
26854 return (unsigned long)prev_esp;
26855
26856@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26857 if (child->thread.gs != value)
26858 return do_arch_prctl(child, ARCH_SET_GS, value);
26859 return 0;
26860+
26861+ case offsetof(struct user_regs_struct,ip):
26862+ /*
26863+ * Protect against any attempt to set ip to an
26864+ * impossible address. There are dragons lurking if the
26865+ * address is noncanonical. (This explicitly allows
26866+ * setting ip to TASK_SIZE_MAX, because user code can do
26867+ * that all by itself by running off the end of its
26868+ * address space.
26869+ */
26870+ if (value > TASK_SIZE_MAX)
26871+ return -EIO;
26872+ break;
26873+
26874 #endif
26875 }
26876
26877@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26878 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26879 {
26880 int i;
26881- int dr7 = 0;
26882+ unsigned long dr7 = 0;
26883 struct arch_hw_breakpoint *info;
26884
26885 for (i = 0; i < HBP_NUM; i++) {
26886@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26887 unsigned long addr, unsigned long data)
26888 {
26889 int ret;
26890- unsigned long __user *datap = (unsigned long __user *)data;
26891+ unsigned long __user *datap = (__force unsigned long __user *)data;
26892
26893 switch (request) {
26894 /* read the word at location addr in the USER area. */
26895@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26896 if ((int) addr < 0)
26897 return -EIO;
26898 ret = do_get_thread_area(child, addr,
26899- (struct user_desc __user *)data);
26900+ (__force struct user_desc __user *) data);
26901 break;
26902
26903 case PTRACE_SET_THREAD_AREA:
26904 if ((int) addr < 0)
26905 return -EIO;
26906 ret = do_set_thread_area(child, addr,
26907- (struct user_desc __user *)data, 0);
26908+ (__force struct user_desc __user *) data, 0);
26909 break;
26910 #endif
26911
26912@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26913
26914 #ifdef CONFIG_X86_64
26915
26916-static struct user_regset x86_64_regsets[] __read_mostly = {
26917+static user_regset_no_const x86_64_regsets[] __read_only = {
26918 [REGSET_GENERAL] = {
26919 .core_note_type = NT_PRSTATUS,
26920 .n = sizeof(struct user_regs_struct) / sizeof(long),
26921@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26922 #endif /* CONFIG_X86_64 */
26923
26924 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26925-static struct user_regset x86_32_regsets[] __read_mostly = {
26926+static user_regset_no_const x86_32_regsets[] __read_only = {
26927 [REGSET_GENERAL] = {
26928 .core_note_type = NT_PRSTATUS,
26929 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26930@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26931 */
26932 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26933
26934-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26935+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26936 {
26937 #ifdef CONFIG_X86_64
26938 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26939@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26940 memset(info, 0, sizeof(*info));
26941 info->si_signo = SIGTRAP;
26942 info->si_code = si_code;
26943- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26944+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26945 }
26946
26947 void user_single_step_siginfo(struct task_struct *tsk,
26948@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26949 }
26950 }
26951
26952+#ifdef CONFIG_GRKERNSEC_SETXID
26953+extern void gr_delayed_cred_worker(void);
26954+#endif
26955+
26956 /*
26957 * We can return 0 to resume the syscall or anything else to go to phase
26958 * 2. If we resume the syscall, we need to put something appropriate in
26959@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26960
26961 BUG_ON(regs != task_pt_regs(current));
26962
26963+#ifdef CONFIG_GRKERNSEC_SETXID
26964+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26965+ gr_delayed_cred_worker();
26966+#endif
26967+
26968 /*
26969 * If we stepped into a sysenter/syscall insn, it trapped in
26970 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26971@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26972 */
26973 user_exit();
26974
26975+#ifdef CONFIG_GRKERNSEC_SETXID
26976+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26977+ gr_delayed_cred_worker();
26978+#endif
26979+
26980 audit_syscall_exit(regs);
26981
26982 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26983diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26984index 2f355d2..e75ed0a 100644
26985--- a/arch/x86/kernel/pvclock.c
26986+++ b/arch/x86/kernel/pvclock.c
26987@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26988 reset_hung_task_detector();
26989 }
26990
26991-static atomic64_t last_value = ATOMIC64_INIT(0);
26992+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26993
26994 void pvclock_resume(void)
26995 {
26996- atomic64_set(&last_value, 0);
26997+ atomic64_set_unchecked(&last_value, 0);
26998 }
26999
27000 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27001@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27002 * updating at the same time, and one of them could be slightly behind,
27003 * making the assumption that last_value always go forward fail to hold.
27004 */
27005- last = atomic64_read(&last_value);
27006+ last = atomic64_read_unchecked(&last_value);
27007 do {
27008 if (ret < last)
27009 return last;
27010- last = atomic64_cmpxchg(&last_value, last, ret);
27011+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27012 } while (unlikely(last != ret));
27013
27014 return ret;
27015diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27016index bae6c60..b438619 100644
27017--- a/arch/x86/kernel/reboot.c
27018+++ b/arch/x86/kernel/reboot.c
27019@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27020
27021 void __noreturn machine_real_restart(unsigned int type)
27022 {
27023+
27024+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27025+ struct desc_struct *gdt;
27026+#endif
27027+
27028 local_irq_disable();
27029
27030 /*
27031@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
27032
27033 /* Jump to the identity-mapped low memory code */
27034 #ifdef CONFIG_X86_32
27035- asm volatile("jmpl *%0" : :
27036+
27037+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27038+ gdt = get_cpu_gdt_table(smp_processor_id());
27039+ pax_open_kernel();
27040+#ifdef CONFIG_PAX_MEMORY_UDEREF
27041+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27042+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27043+ loadsegment(ds, __KERNEL_DS);
27044+ loadsegment(es, __KERNEL_DS);
27045+ loadsegment(ss, __KERNEL_DS);
27046+#endif
27047+#ifdef CONFIG_PAX_KERNEXEC
27048+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27049+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27050+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27051+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27052+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27053+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27054+#endif
27055+ pax_close_kernel();
27056+#endif
27057+
27058+ asm volatile("ljmpl *%0" : :
27059 "rm" (real_mode_header->machine_real_restart_asm),
27060 "a" (type));
27061 #else
27062@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27063 * This means that this function can never return, it can misbehave
27064 * by not rebooting properly and hanging.
27065 */
27066-static void native_machine_emergency_restart(void)
27067+static void __noreturn native_machine_emergency_restart(void)
27068 {
27069 int i;
27070 int attempt = 0;
27071@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
27072 #endif
27073 }
27074
27075-static void __machine_emergency_restart(int emergency)
27076+static void __noreturn __machine_emergency_restart(int emergency)
27077 {
27078 reboot_emergency = emergency;
27079 machine_ops.emergency_restart();
27080 }
27081
27082-static void native_machine_restart(char *__unused)
27083+static void __noreturn native_machine_restart(char *__unused)
27084 {
27085 pr_notice("machine restart\n");
27086
27087@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
27088 __machine_emergency_restart(0);
27089 }
27090
27091-static void native_machine_halt(void)
27092+static void __noreturn native_machine_halt(void)
27093 {
27094 /* Stop other cpus and apics */
27095 machine_shutdown();
27096@@ -646,7 +673,7 @@ static void native_machine_halt(void)
27097 stop_this_cpu(NULL);
27098 }
27099
27100-static void native_machine_power_off(void)
27101+static void __noreturn native_machine_power_off(void)
27102 {
27103 if (pm_power_off) {
27104 if (!reboot_force)
27105@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
27106 }
27107 /* A fallback in case there is no PM info available */
27108 tboot_shutdown(TB_SHUTDOWN_HALT);
27109+ unreachable();
27110 }
27111
27112-struct machine_ops machine_ops = {
27113+struct machine_ops machine_ops __read_only = {
27114 .power_off = native_machine_power_off,
27115 .shutdown = native_machine_shutdown,
27116 .emergency_restart = native_machine_emergency_restart,
27117diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27118index c8e41e9..64049ef 100644
27119--- a/arch/x86/kernel/reboot_fixups_32.c
27120+++ b/arch/x86/kernel/reboot_fixups_32.c
27121@@ -57,7 +57,7 @@ struct device_fixup {
27122 unsigned int vendor;
27123 unsigned int device;
27124 void (*reboot_fixup)(struct pci_dev *);
27125-};
27126+} __do_const;
27127
27128 /*
27129 * PCI ids solely used for fixups_table go here
27130diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27131index 3fd2c69..a444264 100644
27132--- a/arch/x86/kernel/relocate_kernel_64.S
27133+++ b/arch/x86/kernel/relocate_kernel_64.S
27134@@ -96,8 +96,7 @@ relocate_kernel:
27135
27136 /* jump to identity mapped page */
27137 addq $(identity_mapped - relocate_kernel), %r8
27138- pushq %r8
27139- ret
27140+ jmp *%r8
27141
27142 identity_mapped:
27143 /* set return address to 0 if not preserving context */
27144diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27145index ab4734e..c4ca0eb 100644
27146--- a/arch/x86/kernel/setup.c
27147+++ b/arch/x86/kernel/setup.c
27148@@ -110,6 +110,7 @@
27149 #include <asm/mce.h>
27150 #include <asm/alternative.h>
27151 #include <asm/prom.h>
27152+#include <asm/boot.h>
27153
27154 /*
27155 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27156@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27157 #endif
27158
27159
27160-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27161-__visible unsigned long mmu_cr4_features;
27162+#ifdef CONFIG_X86_64
27163+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27164+#elif defined(CONFIG_X86_PAE)
27165+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27166 #else
27167-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27168+__visible unsigned long mmu_cr4_features __read_only;
27169 #endif
27170
27171+void set_in_cr4(unsigned long mask)
27172+{
27173+ unsigned long cr4 = read_cr4();
27174+
27175+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27176+ return;
27177+
27178+ pax_open_kernel();
27179+ mmu_cr4_features |= mask;
27180+ pax_close_kernel();
27181+
27182+ if (trampoline_cr4_features)
27183+ *trampoline_cr4_features = mmu_cr4_features;
27184+ cr4 |= mask;
27185+ write_cr4(cr4);
27186+}
27187+EXPORT_SYMBOL(set_in_cr4);
27188+
27189+void clear_in_cr4(unsigned long mask)
27190+{
27191+ unsigned long cr4 = read_cr4();
27192+
27193+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27194+ return;
27195+
27196+ pax_open_kernel();
27197+ mmu_cr4_features &= ~mask;
27198+ pax_close_kernel();
27199+
27200+ if (trampoline_cr4_features)
27201+ *trampoline_cr4_features = mmu_cr4_features;
27202+ cr4 &= ~mask;
27203+ write_cr4(cr4);
27204+}
27205+EXPORT_SYMBOL(clear_in_cr4);
27206+
27207 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27208 int bootloader_type, bootloader_version;
27209
27210@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27211 * area (640->1Mb) as ram even though it is not.
27212 * take them out.
27213 */
27214- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27215+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27216
27217 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27218 }
27219@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27220 /* called before trim_bios_range() to spare extra sanitize */
27221 static void __init e820_add_kernel_range(void)
27222 {
27223- u64 start = __pa_symbol(_text);
27224+ u64 start = __pa_symbol(ktla_ktva(_text));
27225 u64 size = __pa_symbol(_end) - start;
27226
27227 /*
27228@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27229
27230 void __init setup_arch(char **cmdline_p)
27231 {
27232+#ifdef CONFIG_X86_32
27233+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27234+#else
27235 memblock_reserve(__pa_symbol(_text),
27236 (unsigned long)__bss_stop - (unsigned long)_text);
27237+#endif
27238
27239 early_reserve_initrd();
27240
27241@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27242
27243 if (!boot_params.hdr.root_flags)
27244 root_mountflags &= ~MS_RDONLY;
27245- init_mm.start_code = (unsigned long) _text;
27246- init_mm.end_code = (unsigned long) _etext;
27247+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27248+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27249 init_mm.end_data = (unsigned long) _edata;
27250 init_mm.brk = _brk_end;
27251
27252 mpx_mm_init(&init_mm);
27253
27254- code_resource.start = __pa_symbol(_text);
27255- code_resource.end = __pa_symbol(_etext)-1;
27256- data_resource.start = __pa_symbol(_etext);
27257+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27258+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27259+ data_resource.start = __pa_symbol(_sdata);
27260 data_resource.end = __pa_symbol(_edata)-1;
27261 bss_resource.start = __pa_symbol(__bss_start);
27262 bss_resource.end = __pa_symbol(__bss_stop)-1;
27263diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27264index e4fcb87..9c06c55 100644
27265--- a/arch/x86/kernel/setup_percpu.c
27266+++ b/arch/x86/kernel/setup_percpu.c
27267@@ -21,19 +21,17 @@
27268 #include <asm/cpu.h>
27269 #include <asm/stackprotector.h>
27270
27271-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27272+#ifdef CONFIG_SMP
27273+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27274 EXPORT_PER_CPU_SYMBOL(cpu_number);
27275+#endif
27276
27277-#ifdef CONFIG_X86_64
27278 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27279-#else
27280-#define BOOT_PERCPU_OFFSET 0
27281-#endif
27282
27283 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27284 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27285
27286-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27287+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27288 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27289 };
27290 EXPORT_SYMBOL(__per_cpu_offset);
27291@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27292 {
27293 #ifdef CONFIG_NEED_MULTIPLE_NODES
27294 pg_data_t *last = NULL;
27295- unsigned int cpu;
27296+ int cpu;
27297
27298 for_each_possible_cpu(cpu) {
27299 int node = early_cpu_to_node(cpu);
27300@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27301 {
27302 #ifdef CONFIG_X86_32
27303 struct desc_struct gdt;
27304+ unsigned long base = per_cpu_offset(cpu);
27305
27306- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27307- 0x2 | DESCTYPE_S, 0x8);
27308- gdt.s = 1;
27309+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27310+ 0x83 | DESCTYPE_S, 0xC);
27311 write_gdt_entry(get_cpu_gdt_table(cpu),
27312 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27313 #endif
27314@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27315 /* alrighty, percpu areas up and running */
27316 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27317 for_each_possible_cpu(cpu) {
27318+#ifdef CONFIG_CC_STACKPROTECTOR
27319+#ifdef CONFIG_X86_32
27320+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27321+#endif
27322+#endif
27323 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27324 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27325 per_cpu(cpu_number, cpu) = cpu;
27326@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27327 */
27328 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27329 #endif
27330+#ifdef CONFIG_CC_STACKPROTECTOR
27331+#ifdef CONFIG_X86_32
27332+ if (!cpu)
27333+ per_cpu(stack_canary.canary, cpu) = canary;
27334+#endif
27335+#endif
27336 /*
27337 * Up to this point, the boot CPU has been using .init.data
27338 * area. Reload any changed state for the boot CPU.
27339diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27340index ed37a76..39f936e 100644
27341--- a/arch/x86/kernel/signal.c
27342+++ b/arch/x86/kernel/signal.c
27343@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27344 * Align the stack pointer according to the i386 ABI,
27345 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27346 */
27347- sp = ((sp + 4) & -16ul) - 4;
27348+ sp = ((sp - 12) & -16ul) - 4;
27349 #else /* !CONFIG_X86_32 */
27350 sp = round_down(sp, 16) - 8;
27351 #endif
27352@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27353 }
27354
27355 if (current->mm->context.vdso)
27356- restorer = current->mm->context.vdso +
27357- selected_vdso32->sym___kernel_sigreturn;
27358+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27359 else
27360- restorer = &frame->retcode;
27361+ restorer = (void __user *)&frame->retcode;
27362 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27363 restorer = ksig->ka.sa.sa_restorer;
27364
27365@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27366 * reasons and because gdb uses it as a signature to notice
27367 * signal handler stack frames.
27368 */
27369- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27370+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27371
27372 if (err)
27373 return -EFAULT;
27374@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27375 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27376
27377 /* Set up to return from userspace. */
27378- restorer = current->mm->context.vdso +
27379- selected_vdso32->sym___kernel_rt_sigreturn;
27380+ if (current->mm->context.vdso)
27381+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27382+ else
27383+ restorer = (void __user *)&frame->retcode;
27384 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27385 restorer = ksig->ka.sa.sa_restorer;
27386 put_user_ex(restorer, &frame->pretcode);
27387@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27388 * reasons and because gdb uses it as a signature to notice
27389 * signal handler stack frames.
27390 */
27391- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27392+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27393 } put_user_catch(err);
27394
27395 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27396@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27397 {
27398 int usig = signr_convert(ksig->sig);
27399 sigset_t *set = sigmask_to_save();
27400- compat_sigset_t *cset = (compat_sigset_t *) set;
27401+ sigset_t sigcopy;
27402+ compat_sigset_t *cset;
27403+
27404+ sigcopy = *set;
27405+
27406+ cset = (compat_sigset_t *) &sigcopy;
27407
27408 /* Set up the stack frame */
27409 if (is_ia32_frame()) {
27410@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27411 } else if (is_x32_frame()) {
27412 return x32_setup_rt_frame(ksig, cset, regs);
27413 } else {
27414- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27415+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27416 }
27417 }
27418
27419diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27420index be8e1bd..a3d93fa 100644
27421--- a/arch/x86/kernel/smp.c
27422+++ b/arch/x86/kernel/smp.c
27423@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27424
27425 __setup("nonmi_ipi", nonmi_ipi_setup);
27426
27427-struct smp_ops smp_ops = {
27428+struct smp_ops smp_ops __read_only = {
27429 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27430 .smp_prepare_cpus = native_smp_prepare_cpus,
27431 .smp_cpus_done = native_smp_cpus_done,
27432diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27433index 6d7022c..4feb6be 100644
27434--- a/arch/x86/kernel/smpboot.c
27435+++ b/arch/x86/kernel/smpboot.c
27436@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27437
27438 enable_start_cpu0 = 0;
27439
27440-#ifdef CONFIG_X86_32
27441+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27442+ barrier();
27443+
27444 /* switch away from the initial page table */
27445+#ifdef CONFIG_PAX_PER_CPU_PGD
27446+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27447+#else
27448 load_cr3(swapper_pg_dir);
27449+#endif
27450 __flush_tlb_all();
27451-#endif
27452
27453- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27454- barrier();
27455 /*
27456 * Check TSC synchronization with the BP:
27457 */
27458@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27459 alternatives_enable_smp();
27460
27461 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27462- (THREAD_SIZE + task_stack_page(idle))) - 1);
27463+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27464 per_cpu(current_task, cpu) = idle;
27465+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27466
27467 #ifdef CONFIG_X86_32
27468 /* Stack for startup_32 can be just as for start_secondary onwards */
27469@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27470 clear_tsk_thread_flag(idle, TIF_FORK);
27471 initial_gs = per_cpu_offset(cpu);
27472 #endif
27473- per_cpu(kernel_stack, cpu) =
27474- (unsigned long)task_stack_page(idle) -
27475- KERNEL_STACK_OFFSET + THREAD_SIZE;
27476+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27477+ pax_open_kernel();
27478 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27479+ pax_close_kernel();
27480 initial_code = (unsigned long)start_secondary;
27481 stack_start = idle->thread.sp;
27482
27483@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27484 /* the FPU context is blank, nobody can own it */
27485 __cpu_disable_lazy_restore(cpu);
27486
27487+#ifdef CONFIG_PAX_PER_CPU_PGD
27488+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27489+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27490+ KERNEL_PGD_PTRS);
27491+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27492+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27493+ KERNEL_PGD_PTRS);
27494+#endif
27495+
27496 err = do_boot_cpu(apicid, cpu, tidle);
27497 if (err) {
27498 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27499diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27500index 9b4d51d..5d28b58 100644
27501--- a/arch/x86/kernel/step.c
27502+++ b/arch/x86/kernel/step.c
27503@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27504 struct desc_struct *desc;
27505 unsigned long base;
27506
27507- seg &= ~7UL;
27508+ seg >>= 3;
27509
27510 mutex_lock(&child->mm->context.lock);
27511- if (unlikely((seg >> 3) >= child->mm->context.size))
27512+ if (unlikely(seg >= child->mm->context.size))
27513 addr = -1L; /* bogus selector, access would fault */
27514 else {
27515 desc = child->mm->context.ldt + seg;
27516@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27517 addr += base;
27518 }
27519 mutex_unlock(&child->mm->context.lock);
27520- }
27521+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27522+ addr = ktla_ktva(addr);
27523
27524 return addr;
27525 }
27526@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27527 unsigned char opcode[15];
27528 unsigned long addr = convert_ip_to_linear(child, regs);
27529
27530+ if (addr == -EINVAL)
27531+ return 0;
27532+
27533 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27534 for (i = 0; i < copied; i++) {
27535 switch (opcode[i]) {
27536diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27537new file mode 100644
27538index 0000000..5877189
27539--- /dev/null
27540+++ b/arch/x86/kernel/sys_i386_32.c
27541@@ -0,0 +1,189 @@
27542+/*
27543+ * This file contains various random system calls that
27544+ * have a non-standard calling sequence on the Linux/i386
27545+ * platform.
27546+ */
27547+
27548+#include <linux/errno.h>
27549+#include <linux/sched.h>
27550+#include <linux/mm.h>
27551+#include <linux/fs.h>
27552+#include <linux/smp.h>
27553+#include <linux/sem.h>
27554+#include <linux/msg.h>
27555+#include <linux/shm.h>
27556+#include <linux/stat.h>
27557+#include <linux/syscalls.h>
27558+#include <linux/mman.h>
27559+#include <linux/file.h>
27560+#include <linux/utsname.h>
27561+#include <linux/ipc.h>
27562+#include <linux/elf.h>
27563+
27564+#include <linux/uaccess.h>
27565+#include <linux/unistd.h>
27566+
27567+#include <asm/syscalls.h>
27568+
27569+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27570+{
27571+ unsigned long pax_task_size = TASK_SIZE;
27572+
27573+#ifdef CONFIG_PAX_SEGMEXEC
27574+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27575+ pax_task_size = SEGMEXEC_TASK_SIZE;
27576+#endif
27577+
27578+ if (flags & MAP_FIXED)
27579+ if (len > pax_task_size || addr > pax_task_size - len)
27580+ return -EINVAL;
27581+
27582+ return 0;
27583+}
27584+
27585+/*
27586+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27587+ */
27588+static unsigned long get_align_mask(void)
27589+{
27590+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27591+ return 0;
27592+
27593+ if (!(current->flags & PF_RANDOMIZE))
27594+ return 0;
27595+
27596+ return va_align.mask;
27597+}
27598+
27599+unsigned long
27600+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27601+ unsigned long len, unsigned long pgoff, unsigned long flags)
27602+{
27603+ struct mm_struct *mm = current->mm;
27604+ struct vm_area_struct *vma;
27605+ unsigned long pax_task_size = TASK_SIZE;
27606+ struct vm_unmapped_area_info info;
27607+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27608+
27609+#ifdef CONFIG_PAX_SEGMEXEC
27610+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27611+ pax_task_size = SEGMEXEC_TASK_SIZE;
27612+#endif
27613+
27614+ pax_task_size -= PAGE_SIZE;
27615+
27616+ if (len > pax_task_size)
27617+ return -ENOMEM;
27618+
27619+ if (flags & MAP_FIXED)
27620+ return addr;
27621+
27622+#ifdef CONFIG_PAX_RANDMMAP
27623+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27624+#endif
27625+
27626+ if (addr) {
27627+ addr = PAGE_ALIGN(addr);
27628+ if (pax_task_size - len >= addr) {
27629+ vma = find_vma(mm, addr);
27630+ if (check_heap_stack_gap(vma, addr, len, offset))
27631+ return addr;
27632+ }
27633+ }
27634+
27635+ info.flags = 0;
27636+ info.length = len;
27637+ info.align_mask = filp ? get_align_mask() : 0;
27638+ info.align_offset = pgoff << PAGE_SHIFT;
27639+ info.threadstack_offset = offset;
27640+
27641+#ifdef CONFIG_PAX_PAGEEXEC
27642+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27643+ info.low_limit = 0x00110000UL;
27644+ info.high_limit = mm->start_code;
27645+
27646+#ifdef CONFIG_PAX_RANDMMAP
27647+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27648+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27649+#endif
27650+
27651+ if (info.low_limit < info.high_limit) {
27652+ addr = vm_unmapped_area(&info);
27653+ if (!IS_ERR_VALUE(addr))
27654+ return addr;
27655+ }
27656+ } else
27657+#endif
27658+
27659+ info.low_limit = mm->mmap_base;
27660+ info.high_limit = pax_task_size;
27661+
27662+ return vm_unmapped_area(&info);
27663+}
27664+
27665+unsigned long
27666+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27667+ const unsigned long len, const unsigned long pgoff,
27668+ const unsigned long flags)
27669+{
27670+ struct vm_area_struct *vma;
27671+ struct mm_struct *mm = current->mm;
27672+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27673+ struct vm_unmapped_area_info info;
27674+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27675+
27676+#ifdef CONFIG_PAX_SEGMEXEC
27677+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27678+ pax_task_size = SEGMEXEC_TASK_SIZE;
27679+#endif
27680+
27681+ pax_task_size -= PAGE_SIZE;
27682+
27683+ /* requested length too big for entire address space */
27684+ if (len > pax_task_size)
27685+ return -ENOMEM;
27686+
27687+ if (flags & MAP_FIXED)
27688+ return addr;
27689+
27690+#ifdef CONFIG_PAX_PAGEEXEC
27691+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27692+ goto bottomup;
27693+#endif
27694+
27695+#ifdef CONFIG_PAX_RANDMMAP
27696+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27697+#endif
27698+
27699+ /* requesting a specific address */
27700+ if (addr) {
27701+ addr = PAGE_ALIGN(addr);
27702+ if (pax_task_size - len >= addr) {
27703+ vma = find_vma(mm, addr);
27704+ if (check_heap_stack_gap(vma, addr, len, offset))
27705+ return addr;
27706+ }
27707+ }
27708+
27709+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27710+ info.length = len;
27711+ info.low_limit = PAGE_SIZE;
27712+ info.high_limit = mm->mmap_base;
27713+ info.align_mask = filp ? get_align_mask() : 0;
27714+ info.align_offset = pgoff << PAGE_SHIFT;
27715+ info.threadstack_offset = offset;
27716+
27717+ addr = vm_unmapped_area(&info);
27718+ if (!(addr & ~PAGE_MASK))
27719+ return addr;
27720+ VM_BUG_ON(addr != -ENOMEM);
27721+
27722+bottomup:
27723+ /*
27724+ * A failed mmap() very likely causes application failure,
27725+ * so fall back to the bottom-up function here. This scenario
27726+ * can happen with large stack limits and large mmap()
27727+ * allocations.
27728+ */
27729+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27730+}
27731diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27732index 30277e2..5664a29 100644
27733--- a/arch/x86/kernel/sys_x86_64.c
27734+++ b/arch/x86/kernel/sys_x86_64.c
27735@@ -81,8 +81,8 @@ out:
27736 return error;
27737 }
27738
27739-static void find_start_end(unsigned long flags, unsigned long *begin,
27740- unsigned long *end)
27741+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27742+ unsigned long *begin, unsigned long *end)
27743 {
27744 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27745 unsigned long new_begin;
27746@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27747 *begin = new_begin;
27748 }
27749 } else {
27750- *begin = current->mm->mmap_legacy_base;
27751+ *begin = mm->mmap_legacy_base;
27752 *end = TASK_SIZE;
27753 }
27754 }
27755@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27756 struct vm_area_struct *vma;
27757 struct vm_unmapped_area_info info;
27758 unsigned long begin, end;
27759+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27760
27761 if (flags & MAP_FIXED)
27762 return addr;
27763
27764- find_start_end(flags, &begin, &end);
27765+ find_start_end(mm, flags, &begin, &end);
27766
27767 if (len > end)
27768 return -ENOMEM;
27769
27770+#ifdef CONFIG_PAX_RANDMMAP
27771+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27772+#endif
27773+
27774 if (addr) {
27775 addr = PAGE_ALIGN(addr);
27776 vma = find_vma(mm, addr);
27777- if (end - len >= addr &&
27778- (!vma || addr + len <= vma->vm_start))
27779+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27780 return addr;
27781 }
27782
27783@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27784 info.high_limit = end;
27785 info.align_mask = filp ? get_align_mask() : 0;
27786 info.align_offset = pgoff << PAGE_SHIFT;
27787+ info.threadstack_offset = offset;
27788 return vm_unmapped_area(&info);
27789 }
27790
27791@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27792 struct mm_struct *mm = current->mm;
27793 unsigned long addr = addr0;
27794 struct vm_unmapped_area_info info;
27795+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27796
27797 /* requested length too big for entire address space */
27798 if (len > TASK_SIZE)
27799@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27800 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27801 goto bottomup;
27802
27803+#ifdef CONFIG_PAX_RANDMMAP
27804+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27805+#endif
27806+
27807 /* requesting a specific address */
27808 if (addr) {
27809 addr = PAGE_ALIGN(addr);
27810 vma = find_vma(mm, addr);
27811- if (TASK_SIZE - len >= addr &&
27812- (!vma || addr + len <= vma->vm_start))
27813+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27814 return addr;
27815 }
27816
27817@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27818 info.high_limit = mm->mmap_base;
27819 info.align_mask = filp ? get_align_mask() : 0;
27820 info.align_offset = pgoff << PAGE_SHIFT;
27821+ info.threadstack_offset = offset;
27822 addr = vm_unmapped_area(&info);
27823 if (!(addr & ~PAGE_MASK))
27824 return addr;
27825diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27826index 91a4496..bb87552 100644
27827--- a/arch/x86/kernel/tboot.c
27828+++ b/arch/x86/kernel/tboot.c
27829@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27830
27831 void tboot_shutdown(u32 shutdown_type)
27832 {
27833- void (*shutdown)(void);
27834+ void (* __noreturn shutdown)(void);
27835
27836 if (!tboot_enabled())
27837 return;
27838@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27839
27840 switch_to_tboot_pt();
27841
27842- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27843+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27844 shutdown();
27845
27846 /* should not reach here */
27847@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27848 return -ENODEV;
27849 }
27850
27851-static atomic_t ap_wfs_count;
27852+static atomic_unchecked_t ap_wfs_count;
27853
27854 static int tboot_wait_for_aps(int num_aps)
27855 {
27856@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27857 {
27858 switch (action) {
27859 case CPU_DYING:
27860- atomic_inc(&ap_wfs_count);
27861+ atomic_inc_unchecked(&ap_wfs_count);
27862 if (num_online_cpus() == 1)
27863- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27864+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27865 return NOTIFY_BAD;
27866 break;
27867 }
27868@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27869
27870 tboot_create_trampoline();
27871
27872- atomic_set(&ap_wfs_count, 0);
27873+ atomic_set_unchecked(&ap_wfs_count, 0);
27874 register_hotcpu_notifier(&tboot_cpu_notifier);
27875
27876 #ifdef CONFIG_DEBUG_FS
27877diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27878index 25adc0e..1df4349 100644
27879--- a/arch/x86/kernel/time.c
27880+++ b/arch/x86/kernel/time.c
27881@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27882 {
27883 unsigned long pc = instruction_pointer(regs);
27884
27885- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27886+ if (!user_mode(regs) && in_lock_functions(pc)) {
27887 #ifdef CONFIG_FRAME_POINTER
27888- return *(unsigned long *)(regs->bp + sizeof(long));
27889+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27890 #else
27891 unsigned long *sp =
27892 (unsigned long *)kernel_stack_pointer(regs);
27893@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27894 * or above a saved flags. Eflags has bits 22-31 zero,
27895 * kernel addresses don't.
27896 */
27897+
27898+#ifdef CONFIG_PAX_KERNEXEC
27899+ return ktla_ktva(sp[0]);
27900+#else
27901 if (sp[0] >> 22)
27902 return sp[0];
27903 if (sp[1] >> 22)
27904 return sp[1];
27905 #endif
27906+
27907+#endif
27908 }
27909 return pc;
27910 }
27911diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27912index 7fc5e84..c6e445a 100644
27913--- a/arch/x86/kernel/tls.c
27914+++ b/arch/x86/kernel/tls.c
27915@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27916 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27917 return -EINVAL;
27918
27919+#ifdef CONFIG_PAX_SEGMEXEC
27920+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27921+ return -EINVAL;
27922+#endif
27923+
27924 set_tls_desc(p, idx, &info, 1);
27925
27926 return 0;
27927@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27928
27929 if (kbuf)
27930 info = kbuf;
27931- else if (__copy_from_user(infobuf, ubuf, count))
27932+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27933 return -EFAULT;
27934 else
27935 info = infobuf;
27936diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27937index 1c113db..287b42e 100644
27938--- a/arch/x86/kernel/tracepoint.c
27939+++ b/arch/x86/kernel/tracepoint.c
27940@@ -9,11 +9,11 @@
27941 #include <linux/atomic.h>
27942
27943 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27944-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27945+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27946 (unsigned long) trace_idt_table };
27947
27948 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27949-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27950+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27951
27952 static int trace_irq_vector_refcount;
27953 static DEFINE_MUTEX(irq_vector_mutex);
27954diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27955index 89f4e64..aa4149d 100644
27956--- a/arch/x86/kernel/traps.c
27957+++ b/arch/x86/kernel/traps.c
27958@@ -68,7 +68,7 @@
27959 #include <asm/proto.h>
27960
27961 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27962-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27963+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27964 #else
27965 #include <asm/processor-flags.h>
27966 #include <asm/setup.h>
27967@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27968 #endif
27969
27970 /* Must be page-aligned because the real IDT is used in a fixmap. */
27971-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27972+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27973
27974 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27975 EXPORT_SYMBOL_GPL(used_vectors);
27976@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27977 }
27978
27979 static nokprobe_inline int
27980-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27981+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27982 struct pt_regs *regs, long error_code)
27983 {
27984 #ifdef CONFIG_X86_32
27985- if (regs->flags & X86_VM_MASK) {
27986+ if (v8086_mode(regs)) {
27987 /*
27988 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27989 * On nmi (interrupt 2), do_trap should not be called.
27990@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27991 return -1;
27992 }
27993 #endif
27994- if (!user_mode(regs)) {
27995+ if (!user_mode_novm(regs)) {
27996 if (!fixup_exception(regs)) {
27997 tsk->thread.error_code = error_code;
27998 tsk->thread.trap_nr = trapnr;
27999+
28000+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28001+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28002+ str = "PAX: suspicious stack segment fault";
28003+#endif
28004+
28005 die(str, regs, error_code);
28006 }
28007+
28008+#ifdef CONFIG_PAX_REFCOUNT
28009+ if (trapnr == X86_TRAP_OF)
28010+ pax_report_refcount_overflow(regs);
28011+#endif
28012+
28013 return 0;
28014 }
28015
28016@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28017 }
28018
28019 static void
28020-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28021+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28022 long error_code, siginfo_t *info)
28023 {
28024 struct task_struct *tsk = current;
28025@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28026 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28027 printk_ratelimit()) {
28028 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28029- tsk->comm, tsk->pid, str,
28030+ tsk->comm, task_pid_nr(tsk), str,
28031 regs->ip, regs->sp, error_code);
28032 print_vma_addr(" in ", regs->ip);
28033 pr_cont("\n");
28034@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28035 tsk->thread.error_code = error_code;
28036 tsk->thread.trap_nr = X86_TRAP_DF;
28037
28038+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28039+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28040+ die("grsec: kernel stack overflow detected", regs, error_code);
28041+#endif
28042+
28043 #ifdef CONFIG_DOUBLEFAULT
28044 df_debug(regs, error_code);
28045 #endif
28046@@ -300,7 +317,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
28047 goto exit;
28048 conditional_sti(regs);
28049
28050- if (!user_mode_vm(regs))
28051+ if (!user_mode(regs))
28052 die("bounds", regs, error_code);
28053
28054 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
28055@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28056 conditional_sti(regs);
28057
28058 #ifdef CONFIG_X86_32
28059- if (regs->flags & X86_VM_MASK) {
28060+ if (v8086_mode(regs)) {
28061 local_irq_enable();
28062 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28063 goto exit;
28064@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28065 #endif
28066
28067 tsk = current;
28068- if (!user_mode(regs)) {
28069+ if (!user_mode_novm(regs)) {
28070 if (fixup_exception(regs))
28071 goto exit;
28072
28073 tsk->thread.error_code = error_code;
28074 tsk->thread.trap_nr = X86_TRAP_GP;
28075 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28076- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28077+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28078+
28079+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28080+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28081+ die("PAX: suspicious general protection fault", regs, error_code);
28082+ else
28083+#endif
28084+
28085 die("general protection fault", regs, error_code);
28086+ }
28087 goto exit;
28088 }
28089
28090+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28091+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28092+ struct mm_struct *mm = tsk->mm;
28093+ unsigned long limit;
28094+
28095+ down_write(&mm->mmap_sem);
28096+ limit = mm->context.user_cs_limit;
28097+ if (limit < TASK_SIZE) {
28098+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28099+ up_write(&mm->mmap_sem);
28100+ return;
28101+ }
28102+ up_write(&mm->mmap_sem);
28103+ }
28104+#endif
28105+
28106 tsk->thread.error_code = error_code;
28107 tsk->thread.trap_nr = X86_TRAP_GP;
28108
28109@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28110 container_of(task_pt_regs(current),
28111 struct bad_iret_stack, regs);
28112
28113+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28114+ new_stack = s;
28115+
28116 /* Copy the IRET target to the new stack. */
28117 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28118
28119 /* Copy the remainder of the stack from the current stack. */
28120 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28121
28122- BUG_ON(!user_mode_vm(&new_stack->regs));
28123+ BUG_ON(!user_mode(&new_stack->regs));
28124 return new_stack;
28125 }
28126 NOKPROBE_SYMBOL(fixup_bad_iret);
28127@@ -566,7 +610,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28128 * then it's very likely the result of an icebp/int01 trap.
28129 * User wants a sigtrap for that.
28130 */
28131- if (!dr6 && user_mode_vm(regs))
28132+ if (!dr6 && user_mode(regs))
28133 user_icebp = 1;
28134
28135 /* Catch kmemcheck conditions first of all! */
28136@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28137 /* It's safe to allow irq's after DR6 has been saved */
28138 preempt_conditional_sti(regs);
28139
28140- if (regs->flags & X86_VM_MASK) {
28141+ if (v8086_mode(regs)) {
28142 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28143 X86_TRAP_DB);
28144 preempt_conditional_cli(regs);
28145@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28146 * We already checked v86 mode above, so we can check for kernel mode
28147 * by just checking the CPL of CS.
28148 */
28149- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28150+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28151 tsk->thread.debugreg6 &= ~DR_STEP;
28152 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28153 regs->flags &= ~X86_EFLAGS_TF;
28154@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28155 return;
28156 conditional_sti(regs);
28157
28158- if (!user_mode_vm(regs))
28159+ if (!user_mode(regs))
28160 {
28161 if (!fixup_exception(regs)) {
28162 task->thread.error_code = error_code;
28163diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28164index 5054497..139f8f8 100644
28165--- a/arch/x86/kernel/tsc.c
28166+++ b/arch/x86/kernel/tsc.c
28167@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28168 */
28169 smp_wmb();
28170
28171- ACCESS_ONCE(c2n->head) = data;
28172+ ACCESS_ONCE_RW(c2n->head) = data;
28173 }
28174
28175 /*
28176diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28177index 8b96a94..792b410 100644
28178--- a/arch/x86/kernel/uprobes.c
28179+++ b/arch/x86/kernel/uprobes.c
28180@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28181 int ret = NOTIFY_DONE;
28182
28183 /* We are only interested in userspace traps */
28184- if (regs && !user_mode_vm(regs))
28185+ if (regs && !user_mode(regs))
28186 return NOTIFY_DONE;
28187
28188 switch (val) {
28189@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28190
28191 if (nleft != rasize) {
28192 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28193- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28194+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28195
28196 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28197 }
28198diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28199index b9242ba..50c5edd 100644
28200--- a/arch/x86/kernel/verify_cpu.S
28201+++ b/arch/x86/kernel/verify_cpu.S
28202@@ -20,6 +20,7 @@
28203 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28204 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28205 * arch/x86/kernel/head_32.S: processor startup
28206+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28207 *
28208 * verify_cpu, returns the status of longmode and SSE in register %eax.
28209 * 0: Success 1: Failure
28210diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28211index e8edcf5..27f9344 100644
28212--- a/arch/x86/kernel/vm86_32.c
28213+++ b/arch/x86/kernel/vm86_32.c
28214@@ -44,6 +44,7 @@
28215 #include <linux/ptrace.h>
28216 #include <linux/audit.h>
28217 #include <linux/stddef.h>
28218+#include <linux/grsecurity.h>
28219
28220 #include <asm/uaccess.h>
28221 #include <asm/io.h>
28222@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28223 do_exit(SIGSEGV);
28224 }
28225
28226- tss = &per_cpu(init_tss, get_cpu());
28227+ tss = init_tss + get_cpu();
28228 current->thread.sp0 = current->thread.saved_sp0;
28229 current->thread.sysenter_cs = __KERNEL_CS;
28230 load_sp0(tss, &current->thread);
28231@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28232
28233 if (tsk->thread.saved_sp0)
28234 return -EPERM;
28235+
28236+#ifdef CONFIG_GRKERNSEC_VM86
28237+ if (!capable(CAP_SYS_RAWIO)) {
28238+ gr_handle_vm86();
28239+ return -EPERM;
28240+ }
28241+#endif
28242+
28243 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28244 offsetof(struct kernel_vm86_struct, vm86plus) -
28245 sizeof(info.regs));
28246@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28247 int tmp;
28248 struct vm86plus_struct __user *v86;
28249
28250+#ifdef CONFIG_GRKERNSEC_VM86
28251+ if (!capable(CAP_SYS_RAWIO)) {
28252+ gr_handle_vm86();
28253+ return -EPERM;
28254+ }
28255+#endif
28256+
28257 tsk = current;
28258 switch (cmd) {
28259 case VM86_REQUEST_IRQ:
28260@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28261 tsk->thread.saved_fs = info->regs32->fs;
28262 tsk->thread.saved_gs = get_user_gs(info->regs32);
28263
28264- tss = &per_cpu(init_tss, get_cpu());
28265+ tss = init_tss + get_cpu();
28266 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28267 if (cpu_has_sep)
28268 tsk->thread.sysenter_cs = 0;
28269@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28270 goto cannot_handle;
28271 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28272 goto cannot_handle;
28273- intr_ptr = (unsigned long __user *) (i << 2);
28274+ intr_ptr = (__force unsigned long __user *) (i << 2);
28275 if (get_user(segoffs, intr_ptr))
28276 goto cannot_handle;
28277 if ((segoffs >> 16) == BIOSSEG)
28278diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28279index 00bf300..129df8e 100644
28280--- a/arch/x86/kernel/vmlinux.lds.S
28281+++ b/arch/x86/kernel/vmlinux.lds.S
28282@@ -26,6 +26,13 @@
28283 #include <asm/page_types.h>
28284 #include <asm/cache.h>
28285 #include <asm/boot.h>
28286+#include <asm/segment.h>
28287+
28288+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28289+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28290+#else
28291+#define __KERNEL_TEXT_OFFSET 0
28292+#endif
28293
28294 #undef i386 /* in case the preprocessor is a 32bit one */
28295
28296@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28297
28298 PHDRS {
28299 text PT_LOAD FLAGS(5); /* R_E */
28300+#ifdef CONFIG_X86_32
28301+ module PT_LOAD FLAGS(5); /* R_E */
28302+#endif
28303+#ifdef CONFIG_XEN
28304+ rodata PT_LOAD FLAGS(5); /* R_E */
28305+#else
28306+ rodata PT_LOAD FLAGS(4); /* R__ */
28307+#endif
28308 data PT_LOAD FLAGS(6); /* RW_ */
28309-#ifdef CONFIG_X86_64
28310+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28311 #ifdef CONFIG_SMP
28312 percpu PT_LOAD FLAGS(6); /* RW_ */
28313 #endif
28314+ text.init PT_LOAD FLAGS(5); /* R_E */
28315+ text.exit PT_LOAD FLAGS(5); /* R_E */
28316 init PT_LOAD FLAGS(7); /* RWE */
28317-#endif
28318 note PT_NOTE FLAGS(0); /* ___ */
28319 }
28320
28321 SECTIONS
28322 {
28323 #ifdef CONFIG_X86_32
28324- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28325- phys_startup_32 = startup_32 - LOAD_OFFSET;
28326+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28327 #else
28328- . = __START_KERNEL;
28329- phys_startup_64 = startup_64 - LOAD_OFFSET;
28330+ . = __START_KERNEL;
28331 #endif
28332
28333 /* Text and read-only data */
28334- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28335- _text = .;
28336+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28337 /* bootstrapping code */
28338+#ifdef CONFIG_X86_32
28339+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28340+#else
28341+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28342+#endif
28343+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28344+ _text = .;
28345 HEAD_TEXT
28346 . = ALIGN(8);
28347 _stext = .;
28348@@ -104,13 +124,47 @@ SECTIONS
28349 IRQENTRY_TEXT
28350 *(.fixup)
28351 *(.gnu.warning)
28352- /* End of text section */
28353- _etext = .;
28354 } :text = 0x9090
28355
28356- NOTES :text :note
28357+ . += __KERNEL_TEXT_OFFSET;
28358
28359- EXCEPTION_TABLE(16) :text = 0x9090
28360+#ifdef CONFIG_X86_32
28361+ . = ALIGN(PAGE_SIZE);
28362+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28363+
28364+#ifdef CONFIG_PAX_KERNEXEC
28365+ MODULES_EXEC_VADDR = .;
28366+ BYTE(0)
28367+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28368+ . = ALIGN(HPAGE_SIZE) - 1;
28369+ MODULES_EXEC_END = .;
28370+#endif
28371+
28372+ } :module
28373+#endif
28374+
28375+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28376+ /* End of text section */
28377+ BYTE(0)
28378+ _etext = . - __KERNEL_TEXT_OFFSET;
28379+ }
28380+
28381+#ifdef CONFIG_X86_32
28382+ . = ALIGN(PAGE_SIZE);
28383+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28384+ . = ALIGN(PAGE_SIZE);
28385+ *(.empty_zero_page)
28386+ *(.initial_pg_fixmap)
28387+ *(.initial_pg_pmd)
28388+ *(.initial_page_table)
28389+ *(.swapper_pg_dir)
28390+ } :rodata
28391+#endif
28392+
28393+ . = ALIGN(PAGE_SIZE);
28394+ NOTES :rodata :note
28395+
28396+ EXCEPTION_TABLE(16) :rodata
28397
28398 #if defined(CONFIG_DEBUG_RODATA)
28399 /* .text should occupy whole number of pages */
28400@@ -122,16 +176,20 @@ SECTIONS
28401
28402 /* Data */
28403 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28404+
28405+#ifdef CONFIG_PAX_KERNEXEC
28406+ . = ALIGN(HPAGE_SIZE);
28407+#else
28408+ . = ALIGN(PAGE_SIZE);
28409+#endif
28410+
28411 /* Start of data section */
28412 _sdata = .;
28413
28414 /* init_task */
28415 INIT_TASK_DATA(THREAD_SIZE)
28416
28417-#ifdef CONFIG_X86_32
28418- /* 32 bit has nosave before _edata */
28419 NOSAVE_DATA
28420-#endif
28421
28422 PAGE_ALIGNED_DATA(PAGE_SIZE)
28423
28424@@ -174,12 +232,19 @@ SECTIONS
28425 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28426
28427 /* Init code and data - will be freed after init */
28428- . = ALIGN(PAGE_SIZE);
28429 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28430+ BYTE(0)
28431+
28432+#ifdef CONFIG_PAX_KERNEXEC
28433+ . = ALIGN(HPAGE_SIZE);
28434+#else
28435+ . = ALIGN(PAGE_SIZE);
28436+#endif
28437+
28438 __init_begin = .; /* paired with __init_end */
28439- }
28440+ } :init.begin
28441
28442-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28443+#ifdef CONFIG_SMP
28444 /*
28445 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28446 * output PHDR, so the next output section - .init.text - should
28447@@ -190,12 +255,27 @@ SECTIONS
28448 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28449 #endif
28450
28451- INIT_TEXT_SECTION(PAGE_SIZE)
28452-#ifdef CONFIG_X86_64
28453- :init
28454-#endif
28455+ . = ALIGN(PAGE_SIZE);
28456+ init_begin = .;
28457+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28458+ VMLINUX_SYMBOL(_sinittext) = .;
28459+ INIT_TEXT
28460+ . = ALIGN(PAGE_SIZE);
28461+ } :text.init
28462
28463- INIT_DATA_SECTION(16)
28464+ /*
28465+ * .exit.text is discard at runtime, not link time, to deal with
28466+ * references from .altinstructions and .eh_frame
28467+ */
28468+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28469+ EXIT_TEXT
28470+ VMLINUX_SYMBOL(_einittext) = .;
28471+ . = ALIGN(16);
28472+ } :text.exit
28473+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28474+
28475+ . = ALIGN(PAGE_SIZE);
28476+ INIT_DATA_SECTION(16) :init
28477
28478 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28479 __x86_cpu_dev_start = .;
28480@@ -266,19 +346,12 @@ SECTIONS
28481 }
28482
28483 . = ALIGN(8);
28484- /*
28485- * .exit.text is discard at runtime, not link time, to deal with
28486- * references from .altinstructions and .eh_frame
28487- */
28488- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28489- EXIT_TEXT
28490- }
28491
28492 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28493 EXIT_DATA
28494 }
28495
28496-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28497+#ifndef CONFIG_SMP
28498 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28499 #endif
28500
28501@@ -297,16 +370,10 @@ SECTIONS
28502 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28503 __smp_locks = .;
28504 *(.smp_locks)
28505- . = ALIGN(PAGE_SIZE);
28506 __smp_locks_end = .;
28507+ . = ALIGN(PAGE_SIZE);
28508 }
28509
28510-#ifdef CONFIG_X86_64
28511- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28512- NOSAVE_DATA
28513- }
28514-#endif
28515-
28516 /* BSS */
28517 . = ALIGN(PAGE_SIZE);
28518 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28519@@ -322,6 +389,7 @@ SECTIONS
28520 __brk_base = .;
28521 . += 64 * 1024; /* 64k alignment slop space */
28522 *(.brk_reservation) /* areas brk users have reserved */
28523+ . = ALIGN(HPAGE_SIZE);
28524 __brk_limit = .;
28525 }
28526
28527@@ -348,13 +416,12 @@ SECTIONS
28528 * for the boot processor.
28529 */
28530 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28531-INIT_PER_CPU(gdt_page);
28532 INIT_PER_CPU(irq_stack_union);
28533
28534 /*
28535 * Build-time check on the image size:
28536 */
28537-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28538+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28539 "kernel image bigger than KERNEL_IMAGE_SIZE");
28540
28541 #ifdef CONFIG_SMP
28542diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28543index 2dcc6ff..082dc7a 100644
28544--- a/arch/x86/kernel/vsyscall_64.c
28545+++ b/arch/x86/kernel/vsyscall_64.c
28546@@ -38,15 +38,13 @@
28547 #define CREATE_TRACE_POINTS
28548 #include "vsyscall_trace.h"
28549
28550-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28551+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28552
28553 static int __init vsyscall_setup(char *str)
28554 {
28555 if (str) {
28556 if (!strcmp("emulate", str))
28557 vsyscall_mode = EMULATE;
28558- else if (!strcmp("native", str))
28559- vsyscall_mode = NATIVE;
28560 else if (!strcmp("none", str))
28561 vsyscall_mode = NONE;
28562 else
28563@@ -264,8 +262,7 @@ do_ret:
28564 return true;
28565
28566 sigsegv:
28567- force_sig(SIGSEGV, current);
28568- return true;
28569+ do_group_exit(SIGKILL);
28570 }
28571
28572 /*
28573@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28574 static struct vm_area_struct gate_vma = {
28575 .vm_start = VSYSCALL_ADDR,
28576 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28577- .vm_page_prot = PAGE_READONLY_EXEC,
28578- .vm_flags = VM_READ | VM_EXEC,
28579+ .vm_page_prot = PAGE_READONLY,
28580+ .vm_flags = VM_READ,
28581 .vm_ops = &gate_vma_ops,
28582 };
28583
28584@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28585 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28586
28587 if (vsyscall_mode != NONE)
28588- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28589- vsyscall_mode == NATIVE
28590- ? PAGE_KERNEL_VSYSCALL
28591- : PAGE_KERNEL_VVAR);
28592+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28593
28594 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28595 (unsigned long)VSYSCALL_ADDR);
28596diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28597index 04068192..4d75aa6 100644
28598--- a/arch/x86/kernel/x8664_ksyms_64.c
28599+++ b/arch/x86/kernel/x8664_ksyms_64.c
28600@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28601 EXPORT_SYMBOL(copy_user_generic_unrolled);
28602 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28603 EXPORT_SYMBOL(__copy_user_nocache);
28604-EXPORT_SYMBOL(_copy_from_user);
28605-EXPORT_SYMBOL(_copy_to_user);
28606
28607 EXPORT_SYMBOL(copy_page);
28608 EXPORT_SYMBOL(clear_page);
28609@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28610 EXPORT_SYMBOL(___preempt_schedule_context);
28611 #endif
28612 #endif
28613+
28614+#ifdef CONFIG_PAX_PER_CPU_PGD
28615+EXPORT_SYMBOL(cpu_pgd);
28616+#endif
28617diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28618index 234b072..b7ab191 100644
28619--- a/arch/x86/kernel/x86_init.c
28620+++ b/arch/x86/kernel/x86_init.c
28621@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28622 static void default_nmi_init(void) { };
28623 static int default_i8042_detect(void) { return 1; };
28624
28625-struct x86_platform_ops x86_platform = {
28626+struct x86_platform_ops x86_platform __read_only = {
28627 .calibrate_tsc = native_calibrate_tsc,
28628 .get_wallclock = mach_get_cmos_time,
28629 .set_wallclock = mach_set_rtc_mmss,
28630@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28631 EXPORT_SYMBOL_GPL(x86_platform);
28632
28633 #if defined(CONFIG_PCI_MSI)
28634-struct x86_msi_ops x86_msi = {
28635+struct x86_msi_ops x86_msi __read_only = {
28636 .setup_msi_irqs = native_setup_msi_irqs,
28637 .compose_msi_msg = native_compose_msi_msg,
28638 .teardown_msi_irq = native_teardown_msi_irq,
28639@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28640 }
28641 #endif
28642
28643-struct x86_io_apic_ops x86_io_apic_ops = {
28644+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28645 .init = native_io_apic_init_mappings,
28646 .read = native_io_apic_read,
28647 .write = native_io_apic_write,
28648diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28649index 8be1e17..07dd990 100644
28650--- a/arch/x86/kernel/xsave.c
28651+++ b/arch/x86/kernel/xsave.c
28652@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28653
28654 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28655 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28656- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28657+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28658
28659 if (!use_xsave())
28660 return err;
28661
28662- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28663+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28664
28665 /*
28666 * Read the xstate_bv which we copied (directly from the cpu or
28667 * from the state in task struct) to the user buffers.
28668 */
28669- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28670+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28671
28672 /*
28673 * For legacy compatible, we always set FP/SSE bits in the bit
28674@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28675 */
28676 xstate_bv |= XSTATE_FPSSE;
28677
28678- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28679+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28680
28681 return err;
28682 }
28683@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28684 {
28685 int err;
28686
28687+ buf = (struct xsave_struct __user *)____m(buf);
28688 if (use_xsave())
28689 err = xsave_user(buf);
28690 else if (use_fxsr())
28691@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28692 */
28693 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28694 {
28695+ buf = (void __user *)____m(buf);
28696 if (use_xsave()) {
28697 if ((unsigned long)buf % 64 || fx_only) {
28698 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28699diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28700index 8a80737..bac4961 100644
28701--- a/arch/x86/kvm/cpuid.c
28702+++ b/arch/x86/kvm/cpuid.c
28703@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28704 struct kvm_cpuid2 *cpuid,
28705 struct kvm_cpuid_entry2 __user *entries)
28706 {
28707- int r;
28708+ int r, i;
28709
28710 r = -E2BIG;
28711 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28712 goto out;
28713 r = -EFAULT;
28714- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28715- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28716+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28717 goto out;
28718+ for (i = 0; i < cpuid->nent; ++i) {
28719+ struct kvm_cpuid_entry2 cpuid_entry;
28720+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28721+ goto out;
28722+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28723+ }
28724 vcpu->arch.cpuid_nent = cpuid->nent;
28725 kvm_apic_set_version(vcpu);
28726 kvm_x86_ops->cpuid_update(vcpu);
28727@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28728 struct kvm_cpuid2 *cpuid,
28729 struct kvm_cpuid_entry2 __user *entries)
28730 {
28731- int r;
28732+ int r, i;
28733
28734 r = -E2BIG;
28735 if (cpuid->nent < vcpu->arch.cpuid_nent)
28736 goto out;
28737 r = -EFAULT;
28738- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28739- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28740+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28741 goto out;
28742+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28743+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28744+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28745+ goto out;
28746+ }
28747 return 0;
28748
28749 out:
28750diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28751index b24c2d8..e1e4e259 100644
28752--- a/arch/x86/kvm/emulate.c
28753+++ b/arch/x86/kvm/emulate.c
28754@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28755 int cr = ctxt->modrm_reg;
28756 u64 efer = 0;
28757
28758- static u64 cr_reserved_bits[] = {
28759+ static const u64 cr_reserved_bits[] = {
28760 0xffffffff00000000ULL,
28761 0, 0, 0, /* CR3 checked later */
28762 CR4_RESERVED_BITS,
28763diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28764index d52dcf0..cec7e84 100644
28765--- a/arch/x86/kvm/lapic.c
28766+++ b/arch/x86/kvm/lapic.c
28767@@ -55,7 +55,7 @@
28768 #define APIC_BUS_CYCLE_NS 1
28769
28770 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28771-#define apic_debug(fmt, arg...)
28772+#define apic_debug(fmt, arg...) do {} while (0)
28773
28774 #define APIC_LVT_NUM 6
28775 /* 14 is the version for Xeon and Pentium 8.4.8*/
28776diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28777index fd49c86..77e1aa0 100644
28778--- a/arch/x86/kvm/paging_tmpl.h
28779+++ b/arch/x86/kvm/paging_tmpl.h
28780@@ -343,7 +343,7 @@ retry_walk:
28781 if (unlikely(kvm_is_error_hva(host_addr)))
28782 goto error;
28783
28784- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28785+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28786 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28787 goto error;
28788 walker->ptep_user[walker->level - 1] = ptep_user;
28789diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28790index 41dd038..de331cf 100644
28791--- a/arch/x86/kvm/svm.c
28792+++ b/arch/x86/kvm/svm.c
28793@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28794 int cpu = raw_smp_processor_id();
28795
28796 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28797+
28798+ pax_open_kernel();
28799 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28800+ pax_close_kernel();
28801+
28802 load_TR_desc();
28803 }
28804
28805@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28806 #endif
28807 #endif
28808
28809+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28810+ __set_fs(current_thread_info()->addr_limit);
28811+#endif
28812+
28813 reload_tss(vcpu);
28814
28815 local_irq_disable();
28816diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28817index d4c58d8..eaf2568 100644
28818--- a/arch/x86/kvm/vmx.c
28819+++ b/arch/x86/kvm/vmx.c
28820@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28821 #endif
28822 }
28823
28824-static void vmcs_clear_bits(unsigned long field, u32 mask)
28825+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28826 {
28827 vmcs_writel(field, vmcs_readl(field) & ~mask);
28828 }
28829
28830-static void vmcs_set_bits(unsigned long field, u32 mask)
28831+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28832 {
28833 vmcs_writel(field, vmcs_readl(field) | mask);
28834 }
28835@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28836 struct desc_struct *descs;
28837
28838 descs = (void *)gdt->address;
28839+
28840+ pax_open_kernel();
28841 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28842+ pax_close_kernel();
28843+
28844 load_TR_desc();
28845 }
28846
28847@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28848 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28849 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28850
28851+#ifdef CONFIG_PAX_PER_CPU_PGD
28852+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28853+#endif
28854+
28855 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28856 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28857 vmx->loaded_vmcs->cpu = cpu;
28858@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28859 * reads and returns guest's timestamp counter "register"
28860 * guest_tsc = host_tsc + tsc_offset -- 21.3
28861 */
28862-static u64 guest_read_tsc(void)
28863+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28864 {
28865 u64 host_tsc, tsc_offset;
28866
28867@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28868 unsigned long cr4;
28869
28870 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28871+
28872+#ifndef CONFIG_PAX_PER_CPU_PGD
28873 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28874+#endif
28875
28876 /* Save the most likely value for this task's CR4 in the VMCS. */
28877 cr4 = read_cr4();
28878@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28879 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28880 vmx->host_idt_base = dt.address;
28881
28882- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28883+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28884
28885 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28886 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28887@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28888 * page upon invalidation. No need to do anything if the
28889 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28890 */
28891- kvm_x86_ops->set_apic_access_page_addr = NULL;
28892+ pax_open_kernel();
28893+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28894+ pax_close_kernel();
28895 }
28896
28897- if (!cpu_has_vmx_tpr_shadow())
28898- kvm_x86_ops->update_cr8_intercept = NULL;
28899+ if (!cpu_has_vmx_tpr_shadow()) {
28900+ pax_open_kernel();
28901+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28902+ pax_close_kernel();
28903+ }
28904
28905 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28906 kvm_disable_largepages();
28907@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28908 if (!cpu_has_vmx_apicv())
28909 enable_apicv = 0;
28910
28911+ pax_open_kernel();
28912 if (enable_apicv)
28913- kvm_x86_ops->update_cr8_intercept = NULL;
28914+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28915 else {
28916- kvm_x86_ops->hwapic_irr_update = NULL;
28917- kvm_x86_ops->deliver_posted_interrupt = NULL;
28918- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28919+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28920+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28921+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28922 }
28923+ pax_close_kernel();
28924
28925 if (nested)
28926 nested_vmx_setup_ctls_msrs();
28927@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28928 "jmp 2f \n\t"
28929 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28930 "2: "
28931+
28932+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28933+ "ljmp %[cs],$3f\n\t"
28934+ "3: "
28935+#endif
28936+
28937 /* Save guest registers, load host registers, keep flags */
28938 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28939 "pop %0 \n\t"
28940@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28941 #endif
28942 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28943 [wordsize]"i"(sizeof(ulong))
28944+
28945+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28946+ ,[cs]"i"(__KERNEL_CS)
28947+#endif
28948+
28949 : "cc", "memory"
28950 #ifdef CONFIG_X86_64
28951 , "rax", "rbx", "rdi", "rsi"
28952@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28953 if (debugctlmsr)
28954 update_debugctlmsr(debugctlmsr);
28955
28956-#ifndef CONFIG_X86_64
28957+#ifdef CONFIG_X86_32
28958 /*
28959 * The sysexit path does not restore ds/es, so we must set them to
28960 * a reasonable value ourselves.
28961@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28962 * may be executed in interrupt context, which saves and restore segments
28963 * around it, nullifying its effect.
28964 */
28965- loadsegment(ds, __USER_DS);
28966- loadsegment(es, __USER_DS);
28967+ loadsegment(ds, __KERNEL_DS);
28968+ loadsegment(es, __KERNEL_DS);
28969+ loadsegment(ss, __KERNEL_DS);
28970+
28971+#ifdef CONFIG_PAX_KERNEXEC
28972+ loadsegment(fs, __KERNEL_PERCPU);
28973+#endif
28974+
28975+#ifdef CONFIG_PAX_MEMORY_UDEREF
28976+ __set_fs(current_thread_info()->addr_limit);
28977+#endif
28978+
28979 #endif
28980
28981 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28982diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28983index 64d76c1..e20a4c1 100644
28984--- a/arch/x86/kvm/x86.c
28985+++ b/arch/x86/kvm/x86.c
28986@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28987 {
28988 struct kvm *kvm = vcpu->kvm;
28989 int lm = is_long_mode(vcpu);
28990- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28991- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28992+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28993+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28994 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28995 : kvm->arch.xen_hvm_config.blob_size_32;
28996 u32 page_num = data & ~PAGE_MASK;
28997@@ -2809,6 +2809,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28998 if (n < msr_list.nmsrs)
28999 goto out;
29000 r = -EFAULT;
29001+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29002+ goto out;
29003 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29004 num_msrs_to_save * sizeof(u32)))
29005 goto out;
29006@@ -5745,7 +5747,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29007 };
29008 #endif
29009
29010-int kvm_arch_init(void *opaque)
29011+int kvm_arch_init(const void *opaque)
29012 {
29013 int r;
29014 struct kvm_x86_ops *ops = opaque;
29015diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29016index c1c1544..f90c9d5 100644
29017--- a/arch/x86/lguest/boot.c
29018+++ b/arch/x86/lguest/boot.c
29019@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29020 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29021 * Launcher to reboot us.
29022 */
29023-static void lguest_restart(char *reason)
29024+static __noreturn void lguest_restart(char *reason)
29025 {
29026 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29027+ BUG();
29028 }
29029
29030 /*G:050
29031diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29032index 00933d5..3a64af9 100644
29033--- a/arch/x86/lib/atomic64_386_32.S
29034+++ b/arch/x86/lib/atomic64_386_32.S
29035@@ -48,6 +48,10 @@ BEGIN(read)
29036 movl (v), %eax
29037 movl 4(v), %edx
29038 RET_ENDP
29039+BEGIN(read_unchecked)
29040+ movl (v), %eax
29041+ movl 4(v), %edx
29042+RET_ENDP
29043 #undef v
29044
29045 #define v %esi
29046@@ -55,6 +59,10 @@ BEGIN(set)
29047 movl %ebx, (v)
29048 movl %ecx, 4(v)
29049 RET_ENDP
29050+BEGIN(set_unchecked)
29051+ movl %ebx, (v)
29052+ movl %ecx, 4(v)
29053+RET_ENDP
29054 #undef v
29055
29056 #define v %esi
29057@@ -70,6 +78,20 @@ RET_ENDP
29058 BEGIN(add)
29059 addl %eax, (v)
29060 adcl %edx, 4(v)
29061+
29062+#ifdef CONFIG_PAX_REFCOUNT
29063+ jno 0f
29064+ subl %eax, (v)
29065+ sbbl %edx, 4(v)
29066+ int $4
29067+0:
29068+ _ASM_EXTABLE(0b, 0b)
29069+#endif
29070+
29071+RET_ENDP
29072+BEGIN(add_unchecked)
29073+ addl %eax, (v)
29074+ adcl %edx, 4(v)
29075 RET_ENDP
29076 #undef v
29077
29078@@ -77,6 +99,24 @@ RET_ENDP
29079 BEGIN(add_return)
29080 addl (v), %eax
29081 adcl 4(v), %edx
29082+
29083+#ifdef CONFIG_PAX_REFCOUNT
29084+ into
29085+1234:
29086+ _ASM_EXTABLE(1234b, 2f)
29087+#endif
29088+
29089+ movl %eax, (v)
29090+ movl %edx, 4(v)
29091+
29092+#ifdef CONFIG_PAX_REFCOUNT
29093+2:
29094+#endif
29095+
29096+RET_ENDP
29097+BEGIN(add_return_unchecked)
29098+ addl (v), %eax
29099+ adcl 4(v), %edx
29100 movl %eax, (v)
29101 movl %edx, 4(v)
29102 RET_ENDP
29103@@ -86,6 +126,20 @@ RET_ENDP
29104 BEGIN(sub)
29105 subl %eax, (v)
29106 sbbl %edx, 4(v)
29107+
29108+#ifdef CONFIG_PAX_REFCOUNT
29109+ jno 0f
29110+ addl %eax, (v)
29111+ adcl %edx, 4(v)
29112+ int $4
29113+0:
29114+ _ASM_EXTABLE(0b, 0b)
29115+#endif
29116+
29117+RET_ENDP
29118+BEGIN(sub_unchecked)
29119+ subl %eax, (v)
29120+ sbbl %edx, 4(v)
29121 RET_ENDP
29122 #undef v
29123
29124@@ -96,6 +150,27 @@ BEGIN(sub_return)
29125 sbbl $0, %edx
29126 addl (v), %eax
29127 adcl 4(v), %edx
29128+
29129+#ifdef CONFIG_PAX_REFCOUNT
29130+ into
29131+1234:
29132+ _ASM_EXTABLE(1234b, 2f)
29133+#endif
29134+
29135+ movl %eax, (v)
29136+ movl %edx, 4(v)
29137+
29138+#ifdef CONFIG_PAX_REFCOUNT
29139+2:
29140+#endif
29141+
29142+RET_ENDP
29143+BEGIN(sub_return_unchecked)
29144+ negl %edx
29145+ negl %eax
29146+ sbbl $0, %edx
29147+ addl (v), %eax
29148+ adcl 4(v), %edx
29149 movl %eax, (v)
29150 movl %edx, 4(v)
29151 RET_ENDP
29152@@ -105,6 +180,20 @@ RET_ENDP
29153 BEGIN(inc)
29154 addl $1, (v)
29155 adcl $0, 4(v)
29156+
29157+#ifdef CONFIG_PAX_REFCOUNT
29158+ jno 0f
29159+ subl $1, (v)
29160+ sbbl $0, 4(v)
29161+ int $4
29162+0:
29163+ _ASM_EXTABLE(0b, 0b)
29164+#endif
29165+
29166+RET_ENDP
29167+BEGIN(inc_unchecked)
29168+ addl $1, (v)
29169+ adcl $0, 4(v)
29170 RET_ENDP
29171 #undef v
29172
29173@@ -114,6 +203,26 @@ BEGIN(inc_return)
29174 movl 4(v), %edx
29175 addl $1, %eax
29176 adcl $0, %edx
29177+
29178+#ifdef CONFIG_PAX_REFCOUNT
29179+ into
29180+1234:
29181+ _ASM_EXTABLE(1234b, 2f)
29182+#endif
29183+
29184+ movl %eax, (v)
29185+ movl %edx, 4(v)
29186+
29187+#ifdef CONFIG_PAX_REFCOUNT
29188+2:
29189+#endif
29190+
29191+RET_ENDP
29192+BEGIN(inc_return_unchecked)
29193+ movl (v), %eax
29194+ movl 4(v), %edx
29195+ addl $1, %eax
29196+ adcl $0, %edx
29197 movl %eax, (v)
29198 movl %edx, 4(v)
29199 RET_ENDP
29200@@ -123,6 +232,20 @@ RET_ENDP
29201 BEGIN(dec)
29202 subl $1, (v)
29203 sbbl $0, 4(v)
29204+
29205+#ifdef CONFIG_PAX_REFCOUNT
29206+ jno 0f
29207+ addl $1, (v)
29208+ adcl $0, 4(v)
29209+ int $4
29210+0:
29211+ _ASM_EXTABLE(0b, 0b)
29212+#endif
29213+
29214+RET_ENDP
29215+BEGIN(dec_unchecked)
29216+ subl $1, (v)
29217+ sbbl $0, 4(v)
29218 RET_ENDP
29219 #undef v
29220
29221@@ -132,6 +255,26 @@ BEGIN(dec_return)
29222 movl 4(v), %edx
29223 subl $1, %eax
29224 sbbl $0, %edx
29225+
29226+#ifdef CONFIG_PAX_REFCOUNT
29227+ into
29228+1234:
29229+ _ASM_EXTABLE(1234b, 2f)
29230+#endif
29231+
29232+ movl %eax, (v)
29233+ movl %edx, 4(v)
29234+
29235+#ifdef CONFIG_PAX_REFCOUNT
29236+2:
29237+#endif
29238+
29239+RET_ENDP
29240+BEGIN(dec_return_unchecked)
29241+ movl (v), %eax
29242+ movl 4(v), %edx
29243+ subl $1, %eax
29244+ sbbl $0, %edx
29245 movl %eax, (v)
29246 movl %edx, 4(v)
29247 RET_ENDP
29248@@ -143,6 +286,13 @@ BEGIN(add_unless)
29249 adcl %edx, %edi
29250 addl (v), %eax
29251 adcl 4(v), %edx
29252+
29253+#ifdef CONFIG_PAX_REFCOUNT
29254+ into
29255+1234:
29256+ _ASM_EXTABLE(1234b, 2f)
29257+#endif
29258+
29259 cmpl %eax, %ecx
29260 je 3f
29261 1:
29262@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29263 1:
29264 addl $1, %eax
29265 adcl $0, %edx
29266+
29267+#ifdef CONFIG_PAX_REFCOUNT
29268+ into
29269+1234:
29270+ _ASM_EXTABLE(1234b, 2f)
29271+#endif
29272+
29273 movl %eax, (v)
29274 movl %edx, 4(v)
29275 movl $1, %eax
29276@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29277 movl 4(v), %edx
29278 subl $1, %eax
29279 sbbl $0, %edx
29280+
29281+#ifdef CONFIG_PAX_REFCOUNT
29282+ into
29283+1234:
29284+ _ASM_EXTABLE(1234b, 1f)
29285+#endif
29286+
29287 js 1f
29288 movl %eax, (v)
29289 movl %edx, 4(v)
29290diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29291index f5cc9eb..51fa319 100644
29292--- a/arch/x86/lib/atomic64_cx8_32.S
29293+++ b/arch/x86/lib/atomic64_cx8_32.S
29294@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29295 CFI_STARTPROC
29296
29297 read64 %ecx
29298+ pax_force_retaddr
29299 ret
29300 CFI_ENDPROC
29301 ENDPROC(atomic64_read_cx8)
29302
29303+ENTRY(atomic64_read_unchecked_cx8)
29304+ CFI_STARTPROC
29305+
29306+ read64 %ecx
29307+ pax_force_retaddr
29308+ ret
29309+ CFI_ENDPROC
29310+ENDPROC(atomic64_read_unchecked_cx8)
29311+
29312 ENTRY(atomic64_set_cx8)
29313 CFI_STARTPROC
29314
29315@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29316 cmpxchg8b (%esi)
29317 jne 1b
29318
29319+ pax_force_retaddr
29320 ret
29321 CFI_ENDPROC
29322 ENDPROC(atomic64_set_cx8)
29323
29324+ENTRY(atomic64_set_unchecked_cx8)
29325+ CFI_STARTPROC
29326+
29327+1:
29328+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29329+ * are atomic on 586 and newer */
29330+ cmpxchg8b (%esi)
29331+ jne 1b
29332+
29333+ pax_force_retaddr
29334+ ret
29335+ CFI_ENDPROC
29336+ENDPROC(atomic64_set_unchecked_cx8)
29337+
29338 ENTRY(atomic64_xchg_cx8)
29339 CFI_STARTPROC
29340
29341@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29342 cmpxchg8b (%esi)
29343 jne 1b
29344
29345+ pax_force_retaddr
29346 ret
29347 CFI_ENDPROC
29348 ENDPROC(atomic64_xchg_cx8)
29349
29350-.macro addsub_return func ins insc
29351-ENTRY(atomic64_\func\()_return_cx8)
29352+.macro addsub_return func ins insc unchecked=""
29353+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29354 CFI_STARTPROC
29355 SAVE ebp
29356 SAVE ebx
29357@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29358 movl %edx, %ecx
29359 \ins\()l %esi, %ebx
29360 \insc\()l %edi, %ecx
29361+
29362+.ifb \unchecked
29363+#ifdef CONFIG_PAX_REFCOUNT
29364+ into
29365+2:
29366+ _ASM_EXTABLE(2b, 3f)
29367+#endif
29368+.endif
29369+
29370 LOCK_PREFIX
29371 cmpxchg8b (%ebp)
29372 jne 1b
29373-
29374-10:
29375 movl %ebx, %eax
29376 movl %ecx, %edx
29377+
29378+.ifb \unchecked
29379+#ifdef CONFIG_PAX_REFCOUNT
29380+3:
29381+#endif
29382+.endif
29383+
29384 RESTORE edi
29385 RESTORE esi
29386 RESTORE ebx
29387 RESTORE ebp
29388+ pax_force_retaddr
29389 ret
29390 CFI_ENDPROC
29391-ENDPROC(atomic64_\func\()_return_cx8)
29392+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29393 .endm
29394
29395 addsub_return add add adc
29396 addsub_return sub sub sbb
29397+addsub_return add add adc _unchecked
29398+addsub_return sub sub sbb _unchecked
29399
29400-.macro incdec_return func ins insc
29401-ENTRY(atomic64_\func\()_return_cx8)
29402+.macro incdec_return func ins insc unchecked=""
29403+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29404 CFI_STARTPROC
29405 SAVE ebx
29406
29407@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29408 movl %edx, %ecx
29409 \ins\()l $1, %ebx
29410 \insc\()l $0, %ecx
29411+
29412+.ifb \unchecked
29413+#ifdef CONFIG_PAX_REFCOUNT
29414+ into
29415+2:
29416+ _ASM_EXTABLE(2b, 3f)
29417+#endif
29418+.endif
29419+
29420 LOCK_PREFIX
29421 cmpxchg8b (%esi)
29422 jne 1b
29423
29424-10:
29425 movl %ebx, %eax
29426 movl %ecx, %edx
29427+
29428+.ifb \unchecked
29429+#ifdef CONFIG_PAX_REFCOUNT
29430+3:
29431+#endif
29432+.endif
29433+
29434 RESTORE ebx
29435+ pax_force_retaddr
29436 ret
29437 CFI_ENDPROC
29438-ENDPROC(atomic64_\func\()_return_cx8)
29439+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29440 .endm
29441
29442 incdec_return inc add adc
29443 incdec_return dec sub sbb
29444+incdec_return inc add adc _unchecked
29445+incdec_return dec sub sbb _unchecked
29446
29447 ENTRY(atomic64_dec_if_positive_cx8)
29448 CFI_STARTPROC
29449@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29450 movl %edx, %ecx
29451 subl $1, %ebx
29452 sbb $0, %ecx
29453+
29454+#ifdef CONFIG_PAX_REFCOUNT
29455+ into
29456+1234:
29457+ _ASM_EXTABLE(1234b, 2f)
29458+#endif
29459+
29460 js 2f
29461 LOCK_PREFIX
29462 cmpxchg8b (%esi)
29463@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29464 movl %ebx, %eax
29465 movl %ecx, %edx
29466 RESTORE ebx
29467+ pax_force_retaddr
29468 ret
29469 CFI_ENDPROC
29470 ENDPROC(atomic64_dec_if_positive_cx8)
29471@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29472 movl %edx, %ecx
29473 addl %ebp, %ebx
29474 adcl %edi, %ecx
29475+
29476+#ifdef CONFIG_PAX_REFCOUNT
29477+ into
29478+1234:
29479+ _ASM_EXTABLE(1234b, 3f)
29480+#endif
29481+
29482 LOCK_PREFIX
29483 cmpxchg8b (%esi)
29484 jne 1b
29485@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29486 CFI_ADJUST_CFA_OFFSET -8
29487 RESTORE ebx
29488 RESTORE ebp
29489+ pax_force_retaddr
29490 ret
29491 4:
29492 cmpl %edx, 4(%esp)
29493@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29494 xorl %ecx, %ecx
29495 addl $1, %ebx
29496 adcl %edx, %ecx
29497+
29498+#ifdef CONFIG_PAX_REFCOUNT
29499+ into
29500+1234:
29501+ _ASM_EXTABLE(1234b, 3f)
29502+#endif
29503+
29504 LOCK_PREFIX
29505 cmpxchg8b (%esi)
29506 jne 1b
29507@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29508 movl $1, %eax
29509 3:
29510 RESTORE ebx
29511+ pax_force_retaddr
29512 ret
29513 CFI_ENDPROC
29514 ENDPROC(atomic64_inc_not_zero_cx8)
29515diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29516index e78b8eee..7e173a8 100644
29517--- a/arch/x86/lib/checksum_32.S
29518+++ b/arch/x86/lib/checksum_32.S
29519@@ -29,7 +29,8 @@
29520 #include <asm/dwarf2.h>
29521 #include <asm/errno.h>
29522 #include <asm/asm.h>
29523-
29524+#include <asm/segment.h>
29525+
29526 /*
29527 * computes a partial checksum, e.g. for TCP/UDP fragments
29528 */
29529@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29530
29531 #define ARGBASE 16
29532 #define FP 12
29533-
29534-ENTRY(csum_partial_copy_generic)
29535+
29536+ENTRY(csum_partial_copy_generic_to_user)
29537 CFI_STARTPROC
29538+
29539+#ifdef CONFIG_PAX_MEMORY_UDEREF
29540+ pushl_cfi %gs
29541+ popl_cfi %es
29542+ jmp csum_partial_copy_generic
29543+#endif
29544+
29545+ENTRY(csum_partial_copy_generic_from_user)
29546+
29547+#ifdef CONFIG_PAX_MEMORY_UDEREF
29548+ pushl_cfi %gs
29549+ popl_cfi %ds
29550+#endif
29551+
29552+ENTRY(csum_partial_copy_generic)
29553 subl $4,%esp
29554 CFI_ADJUST_CFA_OFFSET 4
29555 pushl_cfi %edi
29556@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29557 jmp 4f
29558 SRC(1: movw (%esi), %bx )
29559 addl $2, %esi
29560-DST( movw %bx, (%edi) )
29561+DST( movw %bx, %es:(%edi) )
29562 addl $2, %edi
29563 addw %bx, %ax
29564 adcl $0, %eax
29565@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29566 SRC(1: movl (%esi), %ebx )
29567 SRC( movl 4(%esi), %edx )
29568 adcl %ebx, %eax
29569-DST( movl %ebx, (%edi) )
29570+DST( movl %ebx, %es:(%edi) )
29571 adcl %edx, %eax
29572-DST( movl %edx, 4(%edi) )
29573+DST( movl %edx, %es:4(%edi) )
29574
29575 SRC( movl 8(%esi), %ebx )
29576 SRC( movl 12(%esi), %edx )
29577 adcl %ebx, %eax
29578-DST( movl %ebx, 8(%edi) )
29579+DST( movl %ebx, %es:8(%edi) )
29580 adcl %edx, %eax
29581-DST( movl %edx, 12(%edi) )
29582+DST( movl %edx, %es:12(%edi) )
29583
29584 SRC( movl 16(%esi), %ebx )
29585 SRC( movl 20(%esi), %edx )
29586 adcl %ebx, %eax
29587-DST( movl %ebx, 16(%edi) )
29588+DST( movl %ebx, %es:16(%edi) )
29589 adcl %edx, %eax
29590-DST( movl %edx, 20(%edi) )
29591+DST( movl %edx, %es:20(%edi) )
29592
29593 SRC( movl 24(%esi), %ebx )
29594 SRC( movl 28(%esi), %edx )
29595 adcl %ebx, %eax
29596-DST( movl %ebx, 24(%edi) )
29597+DST( movl %ebx, %es:24(%edi) )
29598 adcl %edx, %eax
29599-DST( movl %edx, 28(%edi) )
29600+DST( movl %edx, %es:28(%edi) )
29601
29602 lea 32(%esi), %esi
29603 lea 32(%edi), %edi
29604@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29605 shrl $2, %edx # This clears CF
29606 SRC(3: movl (%esi), %ebx )
29607 adcl %ebx, %eax
29608-DST( movl %ebx, (%edi) )
29609+DST( movl %ebx, %es:(%edi) )
29610 lea 4(%esi), %esi
29611 lea 4(%edi), %edi
29612 dec %edx
29613@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29614 jb 5f
29615 SRC( movw (%esi), %cx )
29616 leal 2(%esi), %esi
29617-DST( movw %cx, (%edi) )
29618+DST( movw %cx, %es:(%edi) )
29619 leal 2(%edi), %edi
29620 je 6f
29621 shll $16,%ecx
29622 SRC(5: movb (%esi), %cl )
29623-DST( movb %cl, (%edi) )
29624+DST( movb %cl, %es:(%edi) )
29625 6: addl %ecx, %eax
29626 adcl $0, %eax
29627 7:
29628@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29629
29630 6001:
29631 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29632- movl $-EFAULT, (%ebx)
29633+ movl $-EFAULT, %ss:(%ebx)
29634
29635 # zero the complete destination - computing the rest
29636 # is too much work
29637@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29638
29639 6002:
29640 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29641- movl $-EFAULT,(%ebx)
29642+ movl $-EFAULT,%ss:(%ebx)
29643 jmp 5000b
29644
29645 .previous
29646
29647+ pushl_cfi %ss
29648+ popl_cfi %ds
29649+ pushl_cfi %ss
29650+ popl_cfi %es
29651 popl_cfi %ebx
29652 CFI_RESTORE ebx
29653 popl_cfi %esi
29654@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29655 popl_cfi %ecx # equivalent to addl $4,%esp
29656 ret
29657 CFI_ENDPROC
29658-ENDPROC(csum_partial_copy_generic)
29659+ENDPROC(csum_partial_copy_generic_to_user)
29660
29661 #else
29662
29663 /* Version for PentiumII/PPro */
29664
29665 #define ROUND1(x) \
29666+ nop; nop; nop; \
29667 SRC(movl x(%esi), %ebx ) ; \
29668 addl %ebx, %eax ; \
29669- DST(movl %ebx, x(%edi) ) ;
29670+ DST(movl %ebx, %es:x(%edi)) ;
29671
29672 #define ROUND(x) \
29673+ nop; nop; nop; \
29674 SRC(movl x(%esi), %ebx ) ; \
29675 adcl %ebx, %eax ; \
29676- DST(movl %ebx, x(%edi) ) ;
29677+ DST(movl %ebx, %es:x(%edi)) ;
29678
29679 #define ARGBASE 12
29680-
29681-ENTRY(csum_partial_copy_generic)
29682+
29683+ENTRY(csum_partial_copy_generic_to_user)
29684 CFI_STARTPROC
29685+
29686+#ifdef CONFIG_PAX_MEMORY_UDEREF
29687+ pushl_cfi %gs
29688+ popl_cfi %es
29689+ jmp csum_partial_copy_generic
29690+#endif
29691+
29692+ENTRY(csum_partial_copy_generic_from_user)
29693+
29694+#ifdef CONFIG_PAX_MEMORY_UDEREF
29695+ pushl_cfi %gs
29696+ popl_cfi %ds
29697+#endif
29698+
29699+ENTRY(csum_partial_copy_generic)
29700 pushl_cfi %ebx
29701 CFI_REL_OFFSET ebx, 0
29702 pushl_cfi %edi
29703@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29704 subl %ebx, %edi
29705 lea -1(%esi),%edx
29706 andl $-32,%edx
29707- lea 3f(%ebx,%ebx), %ebx
29708+ lea 3f(%ebx,%ebx,2), %ebx
29709 testl %esi, %esi
29710 jmp *%ebx
29711 1: addl $64,%esi
29712@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29713 jb 5f
29714 SRC( movw (%esi), %dx )
29715 leal 2(%esi), %esi
29716-DST( movw %dx, (%edi) )
29717+DST( movw %dx, %es:(%edi) )
29718 leal 2(%edi), %edi
29719 je 6f
29720 shll $16,%edx
29721 5:
29722 SRC( movb (%esi), %dl )
29723-DST( movb %dl, (%edi) )
29724+DST( movb %dl, %es:(%edi) )
29725 6: addl %edx, %eax
29726 adcl $0, %eax
29727 7:
29728 .section .fixup, "ax"
29729 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29730- movl $-EFAULT, (%ebx)
29731+ movl $-EFAULT, %ss:(%ebx)
29732 # zero the complete destination (computing the rest is too much work)
29733 movl ARGBASE+8(%esp),%edi # dst
29734 movl ARGBASE+12(%esp),%ecx # len
29735@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29736 rep; stosb
29737 jmp 7b
29738 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29739- movl $-EFAULT, (%ebx)
29740+ movl $-EFAULT, %ss:(%ebx)
29741 jmp 7b
29742 .previous
29743
29744+#ifdef CONFIG_PAX_MEMORY_UDEREF
29745+ pushl_cfi %ss
29746+ popl_cfi %ds
29747+ pushl_cfi %ss
29748+ popl_cfi %es
29749+#endif
29750+
29751 popl_cfi %esi
29752 CFI_RESTORE esi
29753 popl_cfi %edi
29754@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29755 CFI_RESTORE ebx
29756 ret
29757 CFI_ENDPROC
29758-ENDPROC(csum_partial_copy_generic)
29759+ENDPROC(csum_partial_copy_generic_to_user)
29760
29761 #undef ROUND
29762 #undef ROUND1
29763diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29764index f2145cf..cea889d 100644
29765--- a/arch/x86/lib/clear_page_64.S
29766+++ b/arch/x86/lib/clear_page_64.S
29767@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29768 movl $4096/8,%ecx
29769 xorl %eax,%eax
29770 rep stosq
29771+ pax_force_retaddr
29772 ret
29773 CFI_ENDPROC
29774 ENDPROC(clear_page_c)
29775@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29776 movl $4096,%ecx
29777 xorl %eax,%eax
29778 rep stosb
29779+ pax_force_retaddr
29780 ret
29781 CFI_ENDPROC
29782 ENDPROC(clear_page_c_e)
29783@@ -43,6 +45,7 @@ ENTRY(clear_page)
29784 leaq 64(%rdi),%rdi
29785 jnz .Lloop
29786 nop
29787+ pax_force_retaddr
29788 ret
29789 CFI_ENDPROC
29790 .Lclear_page_end:
29791@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29792
29793 #include <asm/cpufeature.h>
29794
29795- .section .altinstr_replacement,"ax"
29796+ .section .altinstr_replacement,"a"
29797 1: .byte 0xeb /* jmp <disp8> */
29798 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29799 2: .byte 0xeb /* jmp <disp8> */
29800diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29801index 40a1725..5d12ac4 100644
29802--- a/arch/x86/lib/cmpxchg16b_emu.S
29803+++ b/arch/x86/lib/cmpxchg16b_emu.S
29804@@ -8,6 +8,7 @@
29805 #include <linux/linkage.h>
29806 #include <asm/dwarf2.h>
29807 #include <asm/percpu.h>
29808+#include <asm/alternative-asm.h>
29809
29810 .text
29811
29812@@ -46,12 +47,14 @@ CFI_STARTPROC
29813 CFI_REMEMBER_STATE
29814 popfq_cfi
29815 mov $1, %al
29816+ pax_force_retaddr
29817 ret
29818
29819 CFI_RESTORE_STATE
29820 .Lnot_same:
29821 popfq_cfi
29822 xor %al,%al
29823+ pax_force_retaddr
29824 ret
29825
29826 CFI_ENDPROC
29827diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29828index 176cca6..e0d658e 100644
29829--- a/arch/x86/lib/copy_page_64.S
29830+++ b/arch/x86/lib/copy_page_64.S
29831@@ -9,6 +9,7 @@ copy_page_rep:
29832 CFI_STARTPROC
29833 movl $4096/8, %ecx
29834 rep movsq
29835+ pax_force_retaddr
29836 ret
29837 CFI_ENDPROC
29838 ENDPROC(copy_page_rep)
29839@@ -24,8 +25,8 @@ ENTRY(copy_page)
29840 CFI_ADJUST_CFA_OFFSET 2*8
29841 movq %rbx, (%rsp)
29842 CFI_REL_OFFSET rbx, 0
29843- movq %r12, 1*8(%rsp)
29844- CFI_REL_OFFSET r12, 1*8
29845+ movq %r13, 1*8(%rsp)
29846+ CFI_REL_OFFSET r13, 1*8
29847
29848 movl $(4096/64)-5, %ecx
29849 .p2align 4
29850@@ -38,7 +39,7 @@ ENTRY(copy_page)
29851 movq 0x8*4(%rsi), %r9
29852 movq 0x8*5(%rsi), %r10
29853 movq 0x8*6(%rsi), %r11
29854- movq 0x8*7(%rsi), %r12
29855+ movq 0x8*7(%rsi), %r13
29856
29857 prefetcht0 5*64(%rsi)
29858
29859@@ -49,7 +50,7 @@ ENTRY(copy_page)
29860 movq %r9, 0x8*4(%rdi)
29861 movq %r10, 0x8*5(%rdi)
29862 movq %r11, 0x8*6(%rdi)
29863- movq %r12, 0x8*7(%rdi)
29864+ movq %r13, 0x8*7(%rdi)
29865
29866 leaq 64 (%rsi), %rsi
29867 leaq 64 (%rdi), %rdi
29868@@ -68,7 +69,7 @@ ENTRY(copy_page)
29869 movq 0x8*4(%rsi), %r9
29870 movq 0x8*5(%rsi), %r10
29871 movq 0x8*6(%rsi), %r11
29872- movq 0x8*7(%rsi), %r12
29873+ movq 0x8*7(%rsi), %r13
29874
29875 movq %rax, 0x8*0(%rdi)
29876 movq %rbx, 0x8*1(%rdi)
29877@@ -77,7 +78,7 @@ ENTRY(copy_page)
29878 movq %r9, 0x8*4(%rdi)
29879 movq %r10, 0x8*5(%rdi)
29880 movq %r11, 0x8*6(%rdi)
29881- movq %r12, 0x8*7(%rdi)
29882+ movq %r13, 0x8*7(%rdi)
29883
29884 leaq 64(%rdi), %rdi
29885 leaq 64(%rsi), %rsi
29886@@ -85,10 +86,11 @@ ENTRY(copy_page)
29887
29888 movq (%rsp), %rbx
29889 CFI_RESTORE rbx
29890- movq 1*8(%rsp), %r12
29891- CFI_RESTORE r12
29892+ movq 1*8(%rsp), %r13
29893+ CFI_RESTORE r13
29894 addq $2*8, %rsp
29895 CFI_ADJUST_CFA_OFFSET -2*8
29896+ pax_force_retaddr
29897 ret
29898 .Lcopy_page_end:
29899 CFI_ENDPROC
29900@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29901
29902 #include <asm/cpufeature.h>
29903
29904- .section .altinstr_replacement,"ax"
29905+ .section .altinstr_replacement,"a"
29906 1: .byte 0xeb /* jmp <disp8> */
29907 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29908 2:
29909diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29910index dee945d..a84067b 100644
29911--- a/arch/x86/lib/copy_user_64.S
29912+++ b/arch/x86/lib/copy_user_64.S
29913@@ -18,31 +18,7 @@
29914 #include <asm/alternative-asm.h>
29915 #include <asm/asm.h>
29916 #include <asm/smap.h>
29917-
29918-/*
29919- * By placing feature2 after feature1 in altinstructions section, we logically
29920- * implement:
29921- * If CPU has feature2, jmp to alt2 is used
29922- * else if CPU has feature1, jmp to alt1 is used
29923- * else jmp to orig is used.
29924- */
29925- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29926-0:
29927- .byte 0xe9 /* 32bit jump */
29928- .long \orig-1f /* by default jump to orig */
29929-1:
29930- .section .altinstr_replacement,"ax"
29931-2: .byte 0xe9 /* near jump with 32bit immediate */
29932- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29933-3: .byte 0xe9 /* near jump with 32bit immediate */
29934- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29935- .previous
29936-
29937- .section .altinstructions,"a"
29938- altinstruction_entry 0b,2b,\feature1,5,5
29939- altinstruction_entry 0b,3b,\feature2,5,5
29940- .previous
29941- .endm
29942+#include <asm/pgtable.h>
29943
29944 .macro ALIGN_DESTINATION
29945 #ifdef FIX_ALIGNMENT
29946@@ -70,52 +46,6 @@
29947 #endif
29948 .endm
29949
29950-/* Standard copy_to_user with segment limit checking */
29951-ENTRY(_copy_to_user)
29952- CFI_STARTPROC
29953- GET_THREAD_INFO(%rax)
29954- movq %rdi,%rcx
29955- addq %rdx,%rcx
29956- jc bad_to_user
29957- cmpq TI_addr_limit(%rax),%rcx
29958- ja bad_to_user
29959- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29960- copy_user_generic_unrolled,copy_user_generic_string, \
29961- copy_user_enhanced_fast_string
29962- CFI_ENDPROC
29963-ENDPROC(_copy_to_user)
29964-
29965-/* Standard copy_from_user with segment limit checking */
29966-ENTRY(_copy_from_user)
29967- CFI_STARTPROC
29968- GET_THREAD_INFO(%rax)
29969- movq %rsi,%rcx
29970- addq %rdx,%rcx
29971- jc bad_from_user
29972- cmpq TI_addr_limit(%rax),%rcx
29973- ja bad_from_user
29974- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29975- copy_user_generic_unrolled,copy_user_generic_string, \
29976- copy_user_enhanced_fast_string
29977- CFI_ENDPROC
29978-ENDPROC(_copy_from_user)
29979-
29980- .section .fixup,"ax"
29981- /* must zero dest */
29982-ENTRY(bad_from_user)
29983-bad_from_user:
29984- CFI_STARTPROC
29985- movl %edx,%ecx
29986- xorl %eax,%eax
29987- rep
29988- stosb
29989-bad_to_user:
29990- movl %edx,%eax
29991- ret
29992- CFI_ENDPROC
29993-ENDPROC(bad_from_user)
29994- .previous
29995-
29996 /*
29997 * copy_user_generic_unrolled - memory copy with exception handling.
29998 * This version is for CPUs like P4 that don't have efficient micro
29999@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30000 */
30001 ENTRY(copy_user_generic_unrolled)
30002 CFI_STARTPROC
30003+ ASM_PAX_OPEN_USERLAND
30004 ASM_STAC
30005 cmpl $8,%edx
30006 jb 20f /* less then 8 bytes, go to byte copy loop */
30007@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30008 jnz 21b
30009 23: xor %eax,%eax
30010 ASM_CLAC
30011+ ASM_PAX_CLOSE_USERLAND
30012+ pax_force_retaddr
30013 ret
30014
30015 .section .fixup,"ax"
30016@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30017 */
30018 ENTRY(copy_user_generic_string)
30019 CFI_STARTPROC
30020+ ASM_PAX_OPEN_USERLAND
30021 ASM_STAC
30022 cmpl $8,%edx
30023 jb 2f /* less than 8 bytes, go to byte copy loop */
30024@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30025 movsb
30026 xorl %eax,%eax
30027 ASM_CLAC
30028+ ASM_PAX_CLOSE_USERLAND
30029+ pax_force_retaddr
30030 ret
30031
30032 .section .fixup,"ax"
30033@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30034 */
30035 ENTRY(copy_user_enhanced_fast_string)
30036 CFI_STARTPROC
30037+ ASM_PAX_OPEN_USERLAND
30038 ASM_STAC
30039 movl %edx,%ecx
30040 1: rep
30041 movsb
30042 xorl %eax,%eax
30043 ASM_CLAC
30044+ ASM_PAX_CLOSE_USERLAND
30045+ pax_force_retaddr
30046 ret
30047
30048 .section .fixup,"ax"
30049diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30050index 6a4f43c..c70fb52 100644
30051--- a/arch/x86/lib/copy_user_nocache_64.S
30052+++ b/arch/x86/lib/copy_user_nocache_64.S
30053@@ -8,6 +8,7 @@
30054
30055 #include <linux/linkage.h>
30056 #include <asm/dwarf2.h>
30057+#include <asm/alternative-asm.h>
30058
30059 #define FIX_ALIGNMENT 1
30060
30061@@ -16,6 +17,7 @@
30062 #include <asm/thread_info.h>
30063 #include <asm/asm.h>
30064 #include <asm/smap.h>
30065+#include <asm/pgtable.h>
30066
30067 .macro ALIGN_DESTINATION
30068 #ifdef FIX_ALIGNMENT
30069@@ -49,6 +51,16 @@
30070 */
30071 ENTRY(__copy_user_nocache)
30072 CFI_STARTPROC
30073+
30074+#ifdef CONFIG_PAX_MEMORY_UDEREF
30075+ mov pax_user_shadow_base,%rcx
30076+ cmp %rcx,%rsi
30077+ jae 1f
30078+ add %rcx,%rsi
30079+1:
30080+#endif
30081+
30082+ ASM_PAX_OPEN_USERLAND
30083 ASM_STAC
30084 cmpl $8,%edx
30085 jb 20f /* less then 8 bytes, go to byte copy loop */
30086@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30087 jnz 21b
30088 23: xorl %eax,%eax
30089 ASM_CLAC
30090+ ASM_PAX_CLOSE_USERLAND
30091 sfence
30092+ pax_force_retaddr
30093 ret
30094
30095 .section .fixup,"ax"
30096diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30097index 2419d5f..fe52d0e 100644
30098--- a/arch/x86/lib/csum-copy_64.S
30099+++ b/arch/x86/lib/csum-copy_64.S
30100@@ -9,6 +9,7 @@
30101 #include <asm/dwarf2.h>
30102 #include <asm/errno.h>
30103 #include <asm/asm.h>
30104+#include <asm/alternative-asm.h>
30105
30106 /*
30107 * Checksum copy with exception handling.
30108@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30109 CFI_ADJUST_CFA_OFFSET 7*8
30110 movq %rbx, 2*8(%rsp)
30111 CFI_REL_OFFSET rbx, 2*8
30112- movq %r12, 3*8(%rsp)
30113- CFI_REL_OFFSET r12, 3*8
30114+ movq %r15, 3*8(%rsp)
30115+ CFI_REL_OFFSET r15, 3*8
30116 movq %r14, 4*8(%rsp)
30117 CFI_REL_OFFSET r14, 4*8
30118 movq %r13, 5*8(%rsp)
30119@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30120 movl %edx, %ecx
30121
30122 xorl %r9d, %r9d
30123- movq %rcx, %r12
30124+ movq %rcx, %r15
30125
30126- shrq $6, %r12
30127+ shrq $6, %r15
30128 jz .Lhandle_tail /* < 64 */
30129
30130 clc
30131
30132 /* main loop. clear in 64 byte blocks */
30133 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30134- /* r11: temp3, rdx: temp4, r12 loopcnt */
30135+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30136 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30137 .p2align 4
30138 .Lloop:
30139@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30140 adcq %r14, %rax
30141 adcq %r13, %rax
30142
30143- decl %r12d
30144+ decl %r15d
30145
30146 dest
30147 movq %rbx, (%rsi)
30148@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30149 .Lende:
30150 movq 2*8(%rsp), %rbx
30151 CFI_RESTORE rbx
30152- movq 3*8(%rsp), %r12
30153- CFI_RESTORE r12
30154+ movq 3*8(%rsp), %r15
30155+ CFI_RESTORE r15
30156 movq 4*8(%rsp), %r14
30157 CFI_RESTORE r14
30158 movq 5*8(%rsp), %r13
30159@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30160 CFI_RESTORE rbp
30161 addq $7*8, %rsp
30162 CFI_ADJUST_CFA_OFFSET -7*8
30163+ pax_force_retaddr
30164 ret
30165 CFI_RESTORE_STATE
30166
30167diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30168index 1318f75..44c30fd 100644
30169--- a/arch/x86/lib/csum-wrappers_64.c
30170+++ b/arch/x86/lib/csum-wrappers_64.c
30171@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30172 len -= 2;
30173 }
30174 }
30175+ pax_open_userland();
30176 stac();
30177- isum = csum_partial_copy_generic((__force const void *)src,
30178+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30179 dst, len, isum, errp, NULL);
30180 clac();
30181+ pax_close_userland();
30182 if (unlikely(*errp))
30183 goto out_err;
30184
30185@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30186 }
30187
30188 *errp = 0;
30189+ pax_open_userland();
30190 stac();
30191- ret = csum_partial_copy_generic(src, (void __force *)dst,
30192+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30193 len, isum, NULL, errp);
30194 clac();
30195+ pax_close_userland();
30196 return ret;
30197 }
30198 EXPORT_SYMBOL(csum_partial_copy_to_user);
30199diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30200index a451235..a74bfa3 100644
30201--- a/arch/x86/lib/getuser.S
30202+++ b/arch/x86/lib/getuser.S
30203@@ -33,17 +33,40 @@
30204 #include <asm/thread_info.h>
30205 #include <asm/asm.h>
30206 #include <asm/smap.h>
30207+#include <asm/segment.h>
30208+#include <asm/pgtable.h>
30209+#include <asm/alternative-asm.h>
30210+
30211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30212+#define __copyuser_seg gs;
30213+#else
30214+#define __copyuser_seg
30215+#endif
30216
30217 .text
30218 ENTRY(__get_user_1)
30219 CFI_STARTPROC
30220+
30221+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30222 GET_THREAD_INFO(%_ASM_DX)
30223 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30224 jae bad_get_user
30225+
30226+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30227+ mov pax_user_shadow_base,%_ASM_DX
30228+ cmp %_ASM_DX,%_ASM_AX
30229+ jae 1234f
30230+ add %_ASM_DX,%_ASM_AX
30231+1234:
30232+#endif
30233+
30234+#endif
30235+
30236 ASM_STAC
30237-1: movzbl (%_ASM_AX),%edx
30238+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30239 xor %eax,%eax
30240 ASM_CLAC
30241+ pax_force_retaddr
30242 ret
30243 CFI_ENDPROC
30244 ENDPROC(__get_user_1)
30245@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30246 ENTRY(__get_user_2)
30247 CFI_STARTPROC
30248 add $1,%_ASM_AX
30249+
30250+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30251 jc bad_get_user
30252 GET_THREAD_INFO(%_ASM_DX)
30253 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30254 jae bad_get_user
30255+
30256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30257+ mov pax_user_shadow_base,%_ASM_DX
30258+ cmp %_ASM_DX,%_ASM_AX
30259+ jae 1234f
30260+ add %_ASM_DX,%_ASM_AX
30261+1234:
30262+#endif
30263+
30264+#endif
30265+
30266 ASM_STAC
30267-2: movzwl -1(%_ASM_AX),%edx
30268+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30269 xor %eax,%eax
30270 ASM_CLAC
30271+ pax_force_retaddr
30272 ret
30273 CFI_ENDPROC
30274 ENDPROC(__get_user_2)
30275@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30276 ENTRY(__get_user_4)
30277 CFI_STARTPROC
30278 add $3,%_ASM_AX
30279+
30280+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30281 jc bad_get_user
30282 GET_THREAD_INFO(%_ASM_DX)
30283 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30284 jae bad_get_user
30285+
30286+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30287+ mov pax_user_shadow_base,%_ASM_DX
30288+ cmp %_ASM_DX,%_ASM_AX
30289+ jae 1234f
30290+ add %_ASM_DX,%_ASM_AX
30291+1234:
30292+#endif
30293+
30294+#endif
30295+
30296 ASM_STAC
30297-3: movl -3(%_ASM_AX),%edx
30298+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30299 xor %eax,%eax
30300 ASM_CLAC
30301+ pax_force_retaddr
30302 ret
30303 CFI_ENDPROC
30304 ENDPROC(__get_user_4)
30305@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30306 GET_THREAD_INFO(%_ASM_DX)
30307 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30308 jae bad_get_user
30309+
30310+#ifdef CONFIG_PAX_MEMORY_UDEREF
30311+ mov pax_user_shadow_base,%_ASM_DX
30312+ cmp %_ASM_DX,%_ASM_AX
30313+ jae 1234f
30314+ add %_ASM_DX,%_ASM_AX
30315+1234:
30316+#endif
30317+
30318 ASM_STAC
30319 4: movq -7(%_ASM_AX),%rdx
30320 xor %eax,%eax
30321 ASM_CLAC
30322+ pax_force_retaddr
30323 ret
30324 #else
30325 add $7,%_ASM_AX
30326@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30327 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30328 jae bad_get_user_8
30329 ASM_STAC
30330-4: movl -7(%_ASM_AX),%edx
30331-5: movl -3(%_ASM_AX),%ecx
30332+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30333+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30334 xor %eax,%eax
30335 ASM_CLAC
30336+ pax_force_retaddr
30337 ret
30338 #endif
30339 CFI_ENDPROC
30340@@ -113,6 +175,7 @@ bad_get_user:
30341 xor %edx,%edx
30342 mov $(-EFAULT),%_ASM_AX
30343 ASM_CLAC
30344+ pax_force_retaddr
30345 ret
30346 CFI_ENDPROC
30347 END(bad_get_user)
30348@@ -124,6 +187,7 @@ bad_get_user_8:
30349 xor %ecx,%ecx
30350 mov $(-EFAULT),%_ASM_AX
30351 ASM_CLAC
30352+ pax_force_retaddr
30353 ret
30354 CFI_ENDPROC
30355 END(bad_get_user_8)
30356diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30357index 1313ae6..84f25ea 100644
30358--- a/arch/x86/lib/insn.c
30359+++ b/arch/x86/lib/insn.c
30360@@ -20,8 +20,10 @@
30361
30362 #ifdef __KERNEL__
30363 #include <linux/string.h>
30364+#include <asm/pgtable_types.h>
30365 #else
30366 #include <string.h>
30367+#define ktla_ktva(addr) addr
30368 #endif
30369 #include <asm/inat.h>
30370 #include <asm/insn.h>
30371@@ -53,9 +55,9 @@
30372 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30373 {
30374 memset(insn, 0, sizeof(*insn));
30375- insn->kaddr = kaddr;
30376- insn->end_kaddr = kaddr + buf_len;
30377- insn->next_byte = kaddr;
30378+ insn->kaddr = ktla_ktva(kaddr);
30379+ insn->end_kaddr = insn->kaddr + buf_len;
30380+ insn->next_byte = insn->kaddr;
30381 insn->x86_64 = x86_64 ? 1 : 0;
30382 insn->opnd_bytes = 4;
30383 if (x86_64)
30384diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30385index 05a95e7..326f2fa 100644
30386--- a/arch/x86/lib/iomap_copy_64.S
30387+++ b/arch/x86/lib/iomap_copy_64.S
30388@@ -17,6 +17,7 @@
30389
30390 #include <linux/linkage.h>
30391 #include <asm/dwarf2.h>
30392+#include <asm/alternative-asm.h>
30393
30394 /*
30395 * override generic version in lib/iomap_copy.c
30396@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30397 CFI_STARTPROC
30398 movl %edx,%ecx
30399 rep movsd
30400+ pax_force_retaddr
30401 ret
30402 CFI_ENDPROC
30403 ENDPROC(__iowrite32_copy)
30404diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30405index 56313a3..0db417e 100644
30406--- a/arch/x86/lib/memcpy_64.S
30407+++ b/arch/x86/lib/memcpy_64.S
30408@@ -24,7 +24,7 @@
30409 * This gets patched over the unrolled variant (below) via the
30410 * alternative instructions framework:
30411 */
30412- .section .altinstr_replacement, "ax", @progbits
30413+ .section .altinstr_replacement, "a", @progbits
30414 .Lmemcpy_c:
30415 movq %rdi, %rax
30416 movq %rdx, %rcx
30417@@ -33,6 +33,7 @@
30418 rep movsq
30419 movl %edx, %ecx
30420 rep movsb
30421+ pax_force_retaddr
30422 ret
30423 .Lmemcpy_e:
30424 .previous
30425@@ -44,11 +45,12 @@
30426 * This gets patched over the unrolled variant (below) via the
30427 * alternative instructions framework:
30428 */
30429- .section .altinstr_replacement, "ax", @progbits
30430+ .section .altinstr_replacement, "a", @progbits
30431 .Lmemcpy_c_e:
30432 movq %rdi, %rax
30433 movq %rdx, %rcx
30434 rep movsb
30435+ pax_force_retaddr
30436 ret
30437 .Lmemcpy_e_e:
30438 .previous
30439@@ -136,6 +138,7 @@ ENTRY(memcpy)
30440 movq %r9, 1*8(%rdi)
30441 movq %r10, -2*8(%rdi, %rdx)
30442 movq %r11, -1*8(%rdi, %rdx)
30443+ pax_force_retaddr
30444 retq
30445 .p2align 4
30446 .Lless_16bytes:
30447@@ -148,6 +151,7 @@ ENTRY(memcpy)
30448 movq -1*8(%rsi, %rdx), %r9
30449 movq %r8, 0*8(%rdi)
30450 movq %r9, -1*8(%rdi, %rdx)
30451+ pax_force_retaddr
30452 retq
30453 .p2align 4
30454 .Lless_8bytes:
30455@@ -161,6 +165,7 @@ ENTRY(memcpy)
30456 movl -4(%rsi, %rdx), %r8d
30457 movl %ecx, (%rdi)
30458 movl %r8d, -4(%rdi, %rdx)
30459+ pax_force_retaddr
30460 retq
30461 .p2align 4
30462 .Lless_3bytes:
30463@@ -179,6 +184,7 @@ ENTRY(memcpy)
30464 movb %cl, (%rdi)
30465
30466 .Lend:
30467+ pax_force_retaddr
30468 retq
30469 CFI_ENDPROC
30470 ENDPROC(memcpy)
30471diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30472index 65268a6..dd1de11 100644
30473--- a/arch/x86/lib/memmove_64.S
30474+++ b/arch/x86/lib/memmove_64.S
30475@@ -202,14 +202,16 @@ ENTRY(memmove)
30476 movb (%rsi), %r11b
30477 movb %r11b, (%rdi)
30478 13:
30479+ pax_force_retaddr
30480 retq
30481 CFI_ENDPROC
30482
30483- .section .altinstr_replacement,"ax"
30484+ .section .altinstr_replacement,"a"
30485 .Lmemmove_begin_forward_efs:
30486 /* Forward moving data. */
30487 movq %rdx, %rcx
30488 rep movsb
30489+ pax_force_retaddr
30490 retq
30491 .Lmemmove_end_forward_efs:
30492 .previous
30493diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30494index 2dcb380..2eb79fe 100644
30495--- a/arch/x86/lib/memset_64.S
30496+++ b/arch/x86/lib/memset_64.S
30497@@ -16,7 +16,7 @@
30498 *
30499 * rax original destination
30500 */
30501- .section .altinstr_replacement, "ax", @progbits
30502+ .section .altinstr_replacement, "a", @progbits
30503 .Lmemset_c:
30504 movq %rdi,%r9
30505 movq %rdx,%rcx
30506@@ -30,6 +30,7 @@
30507 movl %edx,%ecx
30508 rep stosb
30509 movq %r9,%rax
30510+ pax_force_retaddr
30511 ret
30512 .Lmemset_e:
30513 .previous
30514@@ -45,13 +46,14 @@
30515 *
30516 * rax original destination
30517 */
30518- .section .altinstr_replacement, "ax", @progbits
30519+ .section .altinstr_replacement, "a", @progbits
30520 .Lmemset_c_e:
30521 movq %rdi,%r9
30522 movb %sil,%al
30523 movq %rdx,%rcx
30524 rep stosb
30525 movq %r9,%rax
30526+ pax_force_retaddr
30527 ret
30528 .Lmemset_e_e:
30529 .previous
30530@@ -118,6 +120,7 @@ ENTRY(__memset)
30531
30532 .Lende:
30533 movq %r10,%rax
30534+ pax_force_retaddr
30535 ret
30536
30537 CFI_RESTORE_STATE
30538diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30539index c9f2d9b..e7fd2c0 100644
30540--- a/arch/x86/lib/mmx_32.c
30541+++ b/arch/x86/lib/mmx_32.c
30542@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30543 {
30544 void *p;
30545 int i;
30546+ unsigned long cr0;
30547
30548 if (unlikely(in_interrupt()))
30549 return __memcpy(to, from, len);
30550@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30551 kernel_fpu_begin();
30552
30553 __asm__ __volatile__ (
30554- "1: prefetch (%0)\n" /* This set is 28 bytes */
30555- " prefetch 64(%0)\n"
30556- " prefetch 128(%0)\n"
30557- " prefetch 192(%0)\n"
30558- " prefetch 256(%0)\n"
30559+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30560+ " prefetch 64(%1)\n"
30561+ " prefetch 128(%1)\n"
30562+ " prefetch 192(%1)\n"
30563+ " prefetch 256(%1)\n"
30564 "2: \n"
30565 ".section .fixup, \"ax\"\n"
30566- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30567+ "3: \n"
30568+
30569+#ifdef CONFIG_PAX_KERNEXEC
30570+ " movl %%cr0, %0\n"
30571+ " movl %0, %%eax\n"
30572+ " andl $0xFFFEFFFF, %%eax\n"
30573+ " movl %%eax, %%cr0\n"
30574+#endif
30575+
30576+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30577+
30578+#ifdef CONFIG_PAX_KERNEXEC
30579+ " movl %0, %%cr0\n"
30580+#endif
30581+
30582 " jmp 2b\n"
30583 ".previous\n"
30584 _ASM_EXTABLE(1b, 3b)
30585- : : "r" (from));
30586+ : "=&r" (cr0) : "r" (from) : "ax");
30587
30588 for ( ; i > 5; i--) {
30589 __asm__ __volatile__ (
30590- "1: prefetch 320(%0)\n"
30591- "2: movq (%0), %%mm0\n"
30592- " movq 8(%0), %%mm1\n"
30593- " movq 16(%0), %%mm2\n"
30594- " movq 24(%0), %%mm3\n"
30595- " movq %%mm0, (%1)\n"
30596- " movq %%mm1, 8(%1)\n"
30597- " movq %%mm2, 16(%1)\n"
30598- " movq %%mm3, 24(%1)\n"
30599- " movq 32(%0), %%mm0\n"
30600- " movq 40(%0), %%mm1\n"
30601- " movq 48(%0), %%mm2\n"
30602- " movq 56(%0), %%mm3\n"
30603- " movq %%mm0, 32(%1)\n"
30604- " movq %%mm1, 40(%1)\n"
30605- " movq %%mm2, 48(%1)\n"
30606- " movq %%mm3, 56(%1)\n"
30607+ "1: prefetch 320(%1)\n"
30608+ "2: movq (%1), %%mm0\n"
30609+ " movq 8(%1), %%mm1\n"
30610+ " movq 16(%1), %%mm2\n"
30611+ " movq 24(%1), %%mm3\n"
30612+ " movq %%mm0, (%2)\n"
30613+ " movq %%mm1, 8(%2)\n"
30614+ " movq %%mm2, 16(%2)\n"
30615+ " movq %%mm3, 24(%2)\n"
30616+ " movq 32(%1), %%mm0\n"
30617+ " movq 40(%1), %%mm1\n"
30618+ " movq 48(%1), %%mm2\n"
30619+ " movq 56(%1), %%mm3\n"
30620+ " movq %%mm0, 32(%2)\n"
30621+ " movq %%mm1, 40(%2)\n"
30622+ " movq %%mm2, 48(%2)\n"
30623+ " movq %%mm3, 56(%2)\n"
30624 ".section .fixup, \"ax\"\n"
30625- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30626+ "3:\n"
30627+
30628+#ifdef CONFIG_PAX_KERNEXEC
30629+ " movl %%cr0, %0\n"
30630+ " movl %0, %%eax\n"
30631+ " andl $0xFFFEFFFF, %%eax\n"
30632+ " movl %%eax, %%cr0\n"
30633+#endif
30634+
30635+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30636+
30637+#ifdef CONFIG_PAX_KERNEXEC
30638+ " movl %0, %%cr0\n"
30639+#endif
30640+
30641 " jmp 2b\n"
30642 ".previous\n"
30643 _ASM_EXTABLE(1b, 3b)
30644- : : "r" (from), "r" (to) : "memory");
30645+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30646
30647 from += 64;
30648 to += 64;
30649@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30650 static void fast_copy_page(void *to, void *from)
30651 {
30652 int i;
30653+ unsigned long cr0;
30654
30655 kernel_fpu_begin();
30656
30657@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30658 * but that is for later. -AV
30659 */
30660 __asm__ __volatile__(
30661- "1: prefetch (%0)\n"
30662- " prefetch 64(%0)\n"
30663- " prefetch 128(%0)\n"
30664- " prefetch 192(%0)\n"
30665- " prefetch 256(%0)\n"
30666+ "1: prefetch (%1)\n"
30667+ " prefetch 64(%1)\n"
30668+ " prefetch 128(%1)\n"
30669+ " prefetch 192(%1)\n"
30670+ " prefetch 256(%1)\n"
30671 "2: \n"
30672 ".section .fixup, \"ax\"\n"
30673- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30674+ "3: \n"
30675+
30676+#ifdef CONFIG_PAX_KERNEXEC
30677+ " movl %%cr0, %0\n"
30678+ " movl %0, %%eax\n"
30679+ " andl $0xFFFEFFFF, %%eax\n"
30680+ " movl %%eax, %%cr0\n"
30681+#endif
30682+
30683+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30684+
30685+#ifdef CONFIG_PAX_KERNEXEC
30686+ " movl %0, %%cr0\n"
30687+#endif
30688+
30689 " jmp 2b\n"
30690 ".previous\n"
30691- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30692+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30693
30694 for (i = 0; i < (4096-320)/64; i++) {
30695 __asm__ __volatile__ (
30696- "1: prefetch 320(%0)\n"
30697- "2: movq (%0), %%mm0\n"
30698- " movntq %%mm0, (%1)\n"
30699- " movq 8(%0), %%mm1\n"
30700- " movntq %%mm1, 8(%1)\n"
30701- " movq 16(%0), %%mm2\n"
30702- " movntq %%mm2, 16(%1)\n"
30703- " movq 24(%0), %%mm3\n"
30704- " movntq %%mm3, 24(%1)\n"
30705- " movq 32(%0), %%mm4\n"
30706- " movntq %%mm4, 32(%1)\n"
30707- " movq 40(%0), %%mm5\n"
30708- " movntq %%mm5, 40(%1)\n"
30709- " movq 48(%0), %%mm6\n"
30710- " movntq %%mm6, 48(%1)\n"
30711- " movq 56(%0), %%mm7\n"
30712- " movntq %%mm7, 56(%1)\n"
30713+ "1: prefetch 320(%1)\n"
30714+ "2: movq (%1), %%mm0\n"
30715+ " movntq %%mm0, (%2)\n"
30716+ " movq 8(%1), %%mm1\n"
30717+ " movntq %%mm1, 8(%2)\n"
30718+ " movq 16(%1), %%mm2\n"
30719+ " movntq %%mm2, 16(%2)\n"
30720+ " movq 24(%1), %%mm3\n"
30721+ " movntq %%mm3, 24(%2)\n"
30722+ " movq 32(%1), %%mm4\n"
30723+ " movntq %%mm4, 32(%2)\n"
30724+ " movq 40(%1), %%mm5\n"
30725+ " movntq %%mm5, 40(%2)\n"
30726+ " movq 48(%1), %%mm6\n"
30727+ " movntq %%mm6, 48(%2)\n"
30728+ " movq 56(%1), %%mm7\n"
30729+ " movntq %%mm7, 56(%2)\n"
30730 ".section .fixup, \"ax\"\n"
30731- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30732+ "3:\n"
30733+
30734+#ifdef CONFIG_PAX_KERNEXEC
30735+ " movl %%cr0, %0\n"
30736+ " movl %0, %%eax\n"
30737+ " andl $0xFFFEFFFF, %%eax\n"
30738+ " movl %%eax, %%cr0\n"
30739+#endif
30740+
30741+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30742+
30743+#ifdef CONFIG_PAX_KERNEXEC
30744+ " movl %0, %%cr0\n"
30745+#endif
30746+
30747 " jmp 2b\n"
30748 ".previous\n"
30749- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30750+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30751
30752 from += 64;
30753 to += 64;
30754@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30755 static void fast_copy_page(void *to, void *from)
30756 {
30757 int i;
30758+ unsigned long cr0;
30759
30760 kernel_fpu_begin();
30761
30762 __asm__ __volatile__ (
30763- "1: prefetch (%0)\n"
30764- " prefetch 64(%0)\n"
30765- " prefetch 128(%0)\n"
30766- " prefetch 192(%0)\n"
30767- " prefetch 256(%0)\n"
30768+ "1: prefetch (%1)\n"
30769+ " prefetch 64(%1)\n"
30770+ " prefetch 128(%1)\n"
30771+ " prefetch 192(%1)\n"
30772+ " prefetch 256(%1)\n"
30773 "2: \n"
30774 ".section .fixup, \"ax\"\n"
30775- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30776+ "3: \n"
30777+
30778+#ifdef CONFIG_PAX_KERNEXEC
30779+ " movl %%cr0, %0\n"
30780+ " movl %0, %%eax\n"
30781+ " andl $0xFFFEFFFF, %%eax\n"
30782+ " movl %%eax, %%cr0\n"
30783+#endif
30784+
30785+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30786+
30787+#ifdef CONFIG_PAX_KERNEXEC
30788+ " movl %0, %%cr0\n"
30789+#endif
30790+
30791 " jmp 2b\n"
30792 ".previous\n"
30793- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30794+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30795
30796 for (i = 0; i < 4096/64; i++) {
30797 __asm__ __volatile__ (
30798- "1: prefetch 320(%0)\n"
30799- "2: movq (%0), %%mm0\n"
30800- " movq 8(%0), %%mm1\n"
30801- " movq 16(%0), %%mm2\n"
30802- " movq 24(%0), %%mm3\n"
30803- " movq %%mm0, (%1)\n"
30804- " movq %%mm1, 8(%1)\n"
30805- " movq %%mm2, 16(%1)\n"
30806- " movq %%mm3, 24(%1)\n"
30807- " movq 32(%0), %%mm0\n"
30808- " movq 40(%0), %%mm1\n"
30809- " movq 48(%0), %%mm2\n"
30810- " movq 56(%0), %%mm3\n"
30811- " movq %%mm0, 32(%1)\n"
30812- " movq %%mm1, 40(%1)\n"
30813- " movq %%mm2, 48(%1)\n"
30814- " movq %%mm3, 56(%1)\n"
30815+ "1: prefetch 320(%1)\n"
30816+ "2: movq (%1), %%mm0\n"
30817+ " movq 8(%1), %%mm1\n"
30818+ " movq 16(%1), %%mm2\n"
30819+ " movq 24(%1), %%mm3\n"
30820+ " movq %%mm0, (%2)\n"
30821+ " movq %%mm1, 8(%2)\n"
30822+ " movq %%mm2, 16(%2)\n"
30823+ " movq %%mm3, 24(%2)\n"
30824+ " movq 32(%1), %%mm0\n"
30825+ " movq 40(%1), %%mm1\n"
30826+ " movq 48(%1), %%mm2\n"
30827+ " movq 56(%1), %%mm3\n"
30828+ " movq %%mm0, 32(%2)\n"
30829+ " movq %%mm1, 40(%2)\n"
30830+ " movq %%mm2, 48(%2)\n"
30831+ " movq %%mm3, 56(%2)\n"
30832 ".section .fixup, \"ax\"\n"
30833- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30834+ "3:\n"
30835+
30836+#ifdef CONFIG_PAX_KERNEXEC
30837+ " movl %%cr0, %0\n"
30838+ " movl %0, %%eax\n"
30839+ " andl $0xFFFEFFFF, %%eax\n"
30840+ " movl %%eax, %%cr0\n"
30841+#endif
30842+
30843+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30844+
30845+#ifdef CONFIG_PAX_KERNEXEC
30846+ " movl %0, %%cr0\n"
30847+#endif
30848+
30849 " jmp 2b\n"
30850 ".previous\n"
30851 _ASM_EXTABLE(1b, 3b)
30852- : : "r" (from), "r" (to) : "memory");
30853+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30854
30855 from += 64;
30856 to += 64;
30857diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30858index f6d13ee..d789440 100644
30859--- a/arch/x86/lib/msr-reg.S
30860+++ b/arch/x86/lib/msr-reg.S
30861@@ -3,6 +3,7 @@
30862 #include <asm/dwarf2.h>
30863 #include <asm/asm.h>
30864 #include <asm/msr.h>
30865+#include <asm/alternative-asm.h>
30866
30867 #ifdef CONFIG_X86_64
30868 /*
30869@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30870 movl %edi, 28(%r10)
30871 popq_cfi %rbp
30872 popq_cfi %rbx
30873+ pax_force_retaddr
30874 ret
30875 3:
30876 CFI_RESTORE_STATE
30877diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30878index fc6ba17..14ad9a5 100644
30879--- a/arch/x86/lib/putuser.S
30880+++ b/arch/x86/lib/putuser.S
30881@@ -16,7 +16,9 @@
30882 #include <asm/errno.h>
30883 #include <asm/asm.h>
30884 #include <asm/smap.h>
30885-
30886+#include <asm/segment.h>
30887+#include <asm/pgtable.h>
30888+#include <asm/alternative-asm.h>
30889
30890 /*
30891 * __put_user_X
30892@@ -30,57 +32,125 @@
30893 * as they get called from within inline assembly.
30894 */
30895
30896-#define ENTER CFI_STARTPROC ; \
30897- GET_THREAD_INFO(%_ASM_BX)
30898-#define EXIT ASM_CLAC ; \
30899- ret ; \
30900+#define ENTER CFI_STARTPROC
30901+#define EXIT ASM_CLAC ; \
30902+ pax_force_retaddr ; \
30903+ ret ; \
30904 CFI_ENDPROC
30905
30906+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30907+#define _DEST %_ASM_CX,%_ASM_BX
30908+#else
30909+#define _DEST %_ASM_CX
30910+#endif
30911+
30912+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30913+#define __copyuser_seg gs;
30914+#else
30915+#define __copyuser_seg
30916+#endif
30917+
30918 .text
30919 ENTRY(__put_user_1)
30920 ENTER
30921+
30922+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30923+ GET_THREAD_INFO(%_ASM_BX)
30924 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30925 jae bad_put_user
30926+
30927+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30928+ mov pax_user_shadow_base,%_ASM_BX
30929+ cmp %_ASM_BX,%_ASM_CX
30930+ jb 1234f
30931+ xor %ebx,%ebx
30932+1234:
30933+#endif
30934+
30935+#endif
30936+
30937 ASM_STAC
30938-1: movb %al,(%_ASM_CX)
30939+1: __copyuser_seg movb %al,(_DEST)
30940 xor %eax,%eax
30941 EXIT
30942 ENDPROC(__put_user_1)
30943
30944 ENTRY(__put_user_2)
30945 ENTER
30946+
30947+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30948+ GET_THREAD_INFO(%_ASM_BX)
30949 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30950 sub $1,%_ASM_BX
30951 cmp %_ASM_BX,%_ASM_CX
30952 jae bad_put_user
30953+
30954+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30955+ mov pax_user_shadow_base,%_ASM_BX
30956+ cmp %_ASM_BX,%_ASM_CX
30957+ jb 1234f
30958+ xor %ebx,%ebx
30959+1234:
30960+#endif
30961+
30962+#endif
30963+
30964 ASM_STAC
30965-2: movw %ax,(%_ASM_CX)
30966+2: __copyuser_seg movw %ax,(_DEST)
30967 xor %eax,%eax
30968 EXIT
30969 ENDPROC(__put_user_2)
30970
30971 ENTRY(__put_user_4)
30972 ENTER
30973+
30974+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30975+ GET_THREAD_INFO(%_ASM_BX)
30976 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30977 sub $3,%_ASM_BX
30978 cmp %_ASM_BX,%_ASM_CX
30979 jae bad_put_user
30980+
30981+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30982+ mov pax_user_shadow_base,%_ASM_BX
30983+ cmp %_ASM_BX,%_ASM_CX
30984+ jb 1234f
30985+ xor %ebx,%ebx
30986+1234:
30987+#endif
30988+
30989+#endif
30990+
30991 ASM_STAC
30992-3: movl %eax,(%_ASM_CX)
30993+3: __copyuser_seg movl %eax,(_DEST)
30994 xor %eax,%eax
30995 EXIT
30996 ENDPROC(__put_user_4)
30997
30998 ENTRY(__put_user_8)
30999 ENTER
31000+
31001+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31002+ GET_THREAD_INFO(%_ASM_BX)
31003 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31004 sub $7,%_ASM_BX
31005 cmp %_ASM_BX,%_ASM_CX
31006 jae bad_put_user
31007+
31008+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31009+ mov pax_user_shadow_base,%_ASM_BX
31010+ cmp %_ASM_BX,%_ASM_CX
31011+ jb 1234f
31012+ xor %ebx,%ebx
31013+1234:
31014+#endif
31015+
31016+#endif
31017+
31018 ASM_STAC
31019-4: mov %_ASM_AX,(%_ASM_CX)
31020+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31021 #ifdef CONFIG_X86_32
31022-5: movl %edx,4(%_ASM_CX)
31023+5: __copyuser_seg movl %edx,4(_DEST)
31024 #endif
31025 xor %eax,%eax
31026 EXIT
31027diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31028index 5dff5f0..cadebf4 100644
31029--- a/arch/x86/lib/rwsem.S
31030+++ b/arch/x86/lib/rwsem.S
31031@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31032 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31033 CFI_RESTORE __ASM_REG(dx)
31034 restore_common_regs
31035+ pax_force_retaddr
31036 ret
31037 CFI_ENDPROC
31038 ENDPROC(call_rwsem_down_read_failed)
31039@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31040 movq %rax,%rdi
31041 call rwsem_down_write_failed
31042 restore_common_regs
31043+ pax_force_retaddr
31044 ret
31045 CFI_ENDPROC
31046 ENDPROC(call_rwsem_down_write_failed)
31047@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31048 movq %rax,%rdi
31049 call rwsem_wake
31050 restore_common_regs
31051-1: ret
31052+1: pax_force_retaddr
31053+ ret
31054 CFI_ENDPROC
31055 ENDPROC(call_rwsem_wake)
31056
31057@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31058 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31059 CFI_RESTORE __ASM_REG(dx)
31060 restore_common_regs
31061+ pax_force_retaddr
31062 ret
31063 CFI_ENDPROC
31064 ENDPROC(call_rwsem_downgrade_wake)
31065diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31066index b30b5eb..2b57052 100644
31067--- a/arch/x86/lib/thunk_64.S
31068+++ b/arch/x86/lib/thunk_64.S
31069@@ -9,6 +9,7 @@
31070 #include <asm/dwarf2.h>
31071 #include <asm/calling.h>
31072 #include <asm/asm.h>
31073+#include <asm/alternative-asm.h>
31074
31075 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31076 .macro THUNK name, func, put_ret_addr_in_rdi=0
31077@@ -16,11 +17,11 @@
31078 \name:
31079 CFI_STARTPROC
31080
31081- /* this one pushes 9 elems, the next one would be %rIP */
31082- SAVE_ARGS
31083+ /* this one pushes 15+1 elems, the next one would be %rIP */
31084+ SAVE_ARGS 8
31085
31086 .if \put_ret_addr_in_rdi
31087- movq_cfi_restore 9*8, rdi
31088+ movq_cfi_restore RIP, rdi
31089 .endif
31090
31091 call \func
31092@@ -47,9 +48,10 @@
31093
31094 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31095 CFI_STARTPROC
31096- SAVE_ARGS
31097+ SAVE_ARGS 8
31098 restore:
31099- RESTORE_ARGS
31100+ RESTORE_ARGS 1,8
31101+ pax_force_retaddr
31102 ret
31103 CFI_ENDPROC
31104 _ASM_NOKPROBE(restore)
31105diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31106index e2f5e21..4b22130 100644
31107--- a/arch/x86/lib/usercopy_32.c
31108+++ b/arch/x86/lib/usercopy_32.c
31109@@ -42,11 +42,13 @@ do { \
31110 int __d0; \
31111 might_fault(); \
31112 __asm__ __volatile__( \
31113+ __COPYUSER_SET_ES \
31114 ASM_STAC "\n" \
31115 "0: rep; stosl\n" \
31116 " movl %2,%0\n" \
31117 "1: rep; stosb\n" \
31118 "2: " ASM_CLAC "\n" \
31119+ __COPYUSER_RESTORE_ES \
31120 ".section .fixup,\"ax\"\n" \
31121 "3: lea 0(%2,%0,4),%0\n" \
31122 " jmp 2b\n" \
31123@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31124
31125 #ifdef CONFIG_X86_INTEL_USERCOPY
31126 static unsigned long
31127-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31128+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31129 {
31130 int d0, d1;
31131 __asm__ __volatile__(
31132@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31133 " .align 2,0x90\n"
31134 "3: movl 0(%4), %%eax\n"
31135 "4: movl 4(%4), %%edx\n"
31136- "5: movl %%eax, 0(%3)\n"
31137- "6: movl %%edx, 4(%3)\n"
31138+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31139+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31140 "7: movl 8(%4), %%eax\n"
31141 "8: movl 12(%4),%%edx\n"
31142- "9: movl %%eax, 8(%3)\n"
31143- "10: movl %%edx, 12(%3)\n"
31144+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31145+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31146 "11: movl 16(%4), %%eax\n"
31147 "12: movl 20(%4), %%edx\n"
31148- "13: movl %%eax, 16(%3)\n"
31149- "14: movl %%edx, 20(%3)\n"
31150+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31151+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31152 "15: movl 24(%4), %%eax\n"
31153 "16: movl 28(%4), %%edx\n"
31154- "17: movl %%eax, 24(%3)\n"
31155- "18: movl %%edx, 28(%3)\n"
31156+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31157+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31158 "19: movl 32(%4), %%eax\n"
31159 "20: movl 36(%4), %%edx\n"
31160- "21: movl %%eax, 32(%3)\n"
31161- "22: movl %%edx, 36(%3)\n"
31162+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31163+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31164 "23: movl 40(%4), %%eax\n"
31165 "24: movl 44(%4), %%edx\n"
31166- "25: movl %%eax, 40(%3)\n"
31167- "26: movl %%edx, 44(%3)\n"
31168+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31169+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31170 "27: movl 48(%4), %%eax\n"
31171 "28: movl 52(%4), %%edx\n"
31172- "29: movl %%eax, 48(%3)\n"
31173- "30: movl %%edx, 52(%3)\n"
31174+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31175+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31176 "31: movl 56(%4), %%eax\n"
31177 "32: movl 60(%4), %%edx\n"
31178- "33: movl %%eax, 56(%3)\n"
31179- "34: movl %%edx, 60(%3)\n"
31180+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31181+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31182 " addl $-64, %0\n"
31183 " addl $64, %4\n"
31184 " addl $64, %3\n"
31185@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31186 " shrl $2, %0\n"
31187 " andl $3, %%eax\n"
31188 " cld\n"
31189+ __COPYUSER_SET_ES
31190 "99: rep; movsl\n"
31191 "36: movl %%eax, %0\n"
31192 "37: rep; movsb\n"
31193 "100:\n"
31194+ __COPYUSER_RESTORE_ES
31195+ ".section .fixup,\"ax\"\n"
31196+ "101: lea 0(%%eax,%0,4),%0\n"
31197+ " jmp 100b\n"
31198+ ".previous\n"
31199+ _ASM_EXTABLE(1b,100b)
31200+ _ASM_EXTABLE(2b,100b)
31201+ _ASM_EXTABLE(3b,100b)
31202+ _ASM_EXTABLE(4b,100b)
31203+ _ASM_EXTABLE(5b,100b)
31204+ _ASM_EXTABLE(6b,100b)
31205+ _ASM_EXTABLE(7b,100b)
31206+ _ASM_EXTABLE(8b,100b)
31207+ _ASM_EXTABLE(9b,100b)
31208+ _ASM_EXTABLE(10b,100b)
31209+ _ASM_EXTABLE(11b,100b)
31210+ _ASM_EXTABLE(12b,100b)
31211+ _ASM_EXTABLE(13b,100b)
31212+ _ASM_EXTABLE(14b,100b)
31213+ _ASM_EXTABLE(15b,100b)
31214+ _ASM_EXTABLE(16b,100b)
31215+ _ASM_EXTABLE(17b,100b)
31216+ _ASM_EXTABLE(18b,100b)
31217+ _ASM_EXTABLE(19b,100b)
31218+ _ASM_EXTABLE(20b,100b)
31219+ _ASM_EXTABLE(21b,100b)
31220+ _ASM_EXTABLE(22b,100b)
31221+ _ASM_EXTABLE(23b,100b)
31222+ _ASM_EXTABLE(24b,100b)
31223+ _ASM_EXTABLE(25b,100b)
31224+ _ASM_EXTABLE(26b,100b)
31225+ _ASM_EXTABLE(27b,100b)
31226+ _ASM_EXTABLE(28b,100b)
31227+ _ASM_EXTABLE(29b,100b)
31228+ _ASM_EXTABLE(30b,100b)
31229+ _ASM_EXTABLE(31b,100b)
31230+ _ASM_EXTABLE(32b,100b)
31231+ _ASM_EXTABLE(33b,100b)
31232+ _ASM_EXTABLE(34b,100b)
31233+ _ASM_EXTABLE(35b,100b)
31234+ _ASM_EXTABLE(36b,100b)
31235+ _ASM_EXTABLE(37b,100b)
31236+ _ASM_EXTABLE(99b,101b)
31237+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31238+ : "1"(to), "2"(from), "0"(size)
31239+ : "eax", "edx", "memory");
31240+ return size;
31241+}
31242+
31243+static unsigned long
31244+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31245+{
31246+ int d0, d1;
31247+ __asm__ __volatile__(
31248+ " .align 2,0x90\n"
31249+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31250+ " cmpl $67, %0\n"
31251+ " jbe 3f\n"
31252+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31253+ " .align 2,0x90\n"
31254+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31255+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31256+ "5: movl %%eax, 0(%3)\n"
31257+ "6: movl %%edx, 4(%3)\n"
31258+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31259+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31260+ "9: movl %%eax, 8(%3)\n"
31261+ "10: movl %%edx, 12(%3)\n"
31262+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31263+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31264+ "13: movl %%eax, 16(%3)\n"
31265+ "14: movl %%edx, 20(%3)\n"
31266+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31267+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31268+ "17: movl %%eax, 24(%3)\n"
31269+ "18: movl %%edx, 28(%3)\n"
31270+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31271+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31272+ "21: movl %%eax, 32(%3)\n"
31273+ "22: movl %%edx, 36(%3)\n"
31274+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31275+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31276+ "25: movl %%eax, 40(%3)\n"
31277+ "26: movl %%edx, 44(%3)\n"
31278+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31279+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31280+ "29: movl %%eax, 48(%3)\n"
31281+ "30: movl %%edx, 52(%3)\n"
31282+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31283+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31284+ "33: movl %%eax, 56(%3)\n"
31285+ "34: movl %%edx, 60(%3)\n"
31286+ " addl $-64, %0\n"
31287+ " addl $64, %4\n"
31288+ " addl $64, %3\n"
31289+ " cmpl $63, %0\n"
31290+ " ja 1b\n"
31291+ "35: movl %0, %%eax\n"
31292+ " shrl $2, %0\n"
31293+ " andl $3, %%eax\n"
31294+ " cld\n"
31295+ "99: rep; "__copyuser_seg" movsl\n"
31296+ "36: movl %%eax, %0\n"
31297+ "37: rep; "__copyuser_seg" movsb\n"
31298+ "100:\n"
31299 ".section .fixup,\"ax\"\n"
31300 "101: lea 0(%%eax,%0,4),%0\n"
31301 " jmp 100b\n"
31302@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31303 int d0, d1;
31304 __asm__ __volatile__(
31305 " .align 2,0x90\n"
31306- "0: movl 32(%4), %%eax\n"
31307+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31308 " cmpl $67, %0\n"
31309 " jbe 2f\n"
31310- "1: movl 64(%4), %%eax\n"
31311+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31312 " .align 2,0x90\n"
31313- "2: movl 0(%4), %%eax\n"
31314- "21: movl 4(%4), %%edx\n"
31315+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31316+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31317 " movl %%eax, 0(%3)\n"
31318 " movl %%edx, 4(%3)\n"
31319- "3: movl 8(%4), %%eax\n"
31320- "31: movl 12(%4),%%edx\n"
31321+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31322+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31323 " movl %%eax, 8(%3)\n"
31324 " movl %%edx, 12(%3)\n"
31325- "4: movl 16(%4), %%eax\n"
31326- "41: movl 20(%4), %%edx\n"
31327+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31328+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31329 " movl %%eax, 16(%3)\n"
31330 " movl %%edx, 20(%3)\n"
31331- "10: movl 24(%4), %%eax\n"
31332- "51: movl 28(%4), %%edx\n"
31333+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31334+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31335 " movl %%eax, 24(%3)\n"
31336 " movl %%edx, 28(%3)\n"
31337- "11: movl 32(%4), %%eax\n"
31338- "61: movl 36(%4), %%edx\n"
31339+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31340+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31341 " movl %%eax, 32(%3)\n"
31342 " movl %%edx, 36(%3)\n"
31343- "12: movl 40(%4), %%eax\n"
31344- "71: movl 44(%4), %%edx\n"
31345+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31346+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31347 " movl %%eax, 40(%3)\n"
31348 " movl %%edx, 44(%3)\n"
31349- "13: movl 48(%4), %%eax\n"
31350- "81: movl 52(%4), %%edx\n"
31351+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31352+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31353 " movl %%eax, 48(%3)\n"
31354 " movl %%edx, 52(%3)\n"
31355- "14: movl 56(%4), %%eax\n"
31356- "91: movl 60(%4), %%edx\n"
31357+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31358+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31359 " movl %%eax, 56(%3)\n"
31360 " movl %%edx, 60(%3)\n"
31361 " addl $-64, %0\n"
31362@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31363 " shrl $2, %0\n"
31364 " andl $3, %%eax\n"
31365 " cld\n"
31366- "6: rep; movsl\n"
31367+ "6: rep; "__copyuser_seg" movsl\n"
31368 " movl %%eax,%0\n"
31369- "7: rep; movsb\n"
31370+ "7: rep; "__copyuser_seg" movsb\n"
31371 "8:\n"
31372 ".section .fixup,\"ax\"\n"
31373 "9: lea 0(%%eax,%0,4),%0\n"
31374@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31375
31376 __asm__ __volatile__(
31377 " .align 2,0x90\n"
31378- "0: movl 32(%4), %%eax\n"
31379+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31380 " cmpl $67, %0\n"
31381 " jbe 2f\n"
31382- "1: movl 64(%4), %%eax\n"
31383+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31384 " .align 2,0x90\n"
31385- "2: movl 0(%4), %%eax\n"
31386- "21: movl 4(%4), %%edx\n"
31387+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31388+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31389 " movnti %%eax, 0(%3)\n"
31390 " movnti %%edx, 4(%3)\n"
31391- "3: movl 8(%4), %%eax\n"
31392- "31: movl 12(%4),%%edx\n"
31393+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31394+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31395 " movnti %%eax, 8(%3)\n"
31396 " movnti %%edx, 12(%3)\n"
31397- "4: movl 16(%4), %%eax\n"
31398- "41: movl 20(%4), %%edx\n"
31399+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31400+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31401 " movnti %%eax, 16(%3)\n"
31402 " movnti %%edx, 20(%3)\n"
31403- "10: movl 24(%4), %%eax\n"
31404- "51: movl 28(%4), %%edx\n"
31405+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31406+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31407 " movnti %%eax, 24(%3)\n"
31408 " movnti %%edx, 28(%3)\n"
31409- "11: movl 32(%4), %%eax\n"
31410- "61: movl 36(%4), %%edx\n"
31411+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31412+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31413 " movnti %%eax, 32(%3)\n"
31414 " movnti %%edx, 36(%3)\n"
31415- "12: movl 40(%4), %%eax\n"
31416- "71: movl 44(%4), %%edx\n"
31417+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31418+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31419 " movnti %%eax, 40(%3)\n"
31420 " movnti %%edx, 44(%3)\n"
31421- "13: movl 48(%4), %%eax\n"
31422- "81: movl 52(%4), %%edx\n"
31423+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31424+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31425 " movnti %%eax, 48(%3)\n"
31426 " movnti %%edx, 52(%3)\n"
31427- "14: movl 56(%4), %%eax\n"
31428- "91: movl 60(%4), %%edx\n"
31429+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31430+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31431 " movnti %%eax, 56(%3)\n"
31432 " movnti %%edx, 60(%3)\n"
31433 " addl $-64, %0\n"
31434@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31435 " shrl $2, %0\n"
31436 " andl $3, %%eax\n"
31437 " cld\n"
31438- "6: rep; movsl\n"
31439+ "6: rep; "__copyuser_seg" movsl\n"
31440 " movl %%eax,%0\n"
31441- "7: rep; movsb\n"
31442+ "7: rep; "__copyuser_seg" movsb\n"
31443 "8:\n"
31444 ".section .fixup,\"ax\"\n"
31445 "9: lea 0(%%eax,%0,4),%0\n"
31446@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31447
31448 __asm__ __volatile__(
31449 " .align 2,0x90\n"
31450- "0: movl 32(%4), %%eax\n"
31451+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31452 " cmpl $67, %0\n"
31453 " jbe 2f\n"
31454- "1: movl 64(%4), %%eax\n"
31455+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31456 " .align 2,0x90\n"
31457- "2: movl 0(%4), %%eax\n"
31458- "21: movl 4(%4), %%edx\n"
31459+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31460+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31461 " movnti %%eax, 0(%3)\n"
31462 " movnti %%edx, 4(%3)\n"
31463- "3: movl 8(%4), %%eax\n"
31464- "31: movl 12(%4),%%edx\n"
31465+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31466+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31467 " movnti %%eax, 8(%3)\n"
31468 " movnti %%edx, 12(%3)\n"
31469- "4: movl 16(%4), %%eax\n"
31470- "41: movl 20(%4), %%edx\n"
31471+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31472+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31473 " movnti %%eax, 16(%3)\n"
31474 " movnti %%edx, 20(%3)\n"
31475- "10: movl 24(%4), %%eax\n"
31476- "51: movl 28(%4), %%edx\n"
31477+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31478+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31479 " movnti %%eax, 24(%3)\n"
31480 " movnti %%edx, 28(%3)\n"
31481- "11: movl 32(%4), %%eax\n"
31482- "61: movl 36(%4), %%edx\n"
31483+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31484+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31485 " movnti %%eax, 32(%3)\n"
31486 " movnti %%edx, 36(%3)\n"
31487- "12: movl 40(%4), %%eax\n"
31488- "71: movl 44(%4), %%edx\n"
31489+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31490+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31491 " movnti %%eax, 40(%3)\n"
31492 " movnti %%edx, 44(%3)\n"
31493- "13: movl 48(%4), %%eax\n"
31494- "81: movl 52(%4), %%edx\n"
31495+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31496+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31497 " movnti %%eax, 48(%3)\n"
31498 " movnti %%edx, 52(%3)\n"
31499- "14: movl 56(%4), %%eax\n"
31500- "91: movl 60(%4), %%edx\n"
31501+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31502+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31503 " movnti %%eax, 56(%3)\n"
31504 " movnti %%edx, 60(%3)\n"
31505 " addl $-64, %0\n"
31506@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31507 " shrl $2, %0\n"
31508 " andl $3, %%eax\n"
31509 " cld\n"
31510- "6: rep; movsl\n"
31511+ "6: rep; "__copyuser_seg" movsl\n"
31512 " movl %%eax,%0\n"
31513- "7: rep; movsb\n"
31514+ "7: rep; "__copyuser_seg" movsb\n"
31515 "8:\n"
31516 ".section .fixup,\"ax\"\n"
31517 "9: lea 0(%%eax,%0,4),%0\n"
31518@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31519 */
31520 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31521 unsigned long size);
31522-unsigned long __copy_user_intel(void __user *to, const void *from,
31523+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31524+ unsigned long size);
31525+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31526 unsigned long size);
31527 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31528 const void __user *from, unsigned long size);
31529 #endif /* CONFIG_X86_INTEL_USERCOPY */
31530
31531 /* Generic arbitrary sized copy. */
31532-#define __copy_user(to, from, size) \
31533+#define __copy_user(to, from, size, prefix, set, restore) \
31534 do { \
31535 int __d0, __d1, __d2; \
31536 __asm__ __volatile__( \
31537+ set \
31538 " cmp $7,%0\n" \
31539 " jbe 1f\n" \
31540 " movl %1,%0\n" \
31541 " negl %0\n" \
31542 " andl $7,%0\n" \
31543 " subl %0,%3\n" \
31544- "4: rep; movsb\n" \
31545+ "4: rep; "prefix"movsb\n" \
31546 " movl %3,%0\n" \
31547 " shrl $2,%0\n" \
31548 " andl $3,%3\n" \
31549 " .align 2,0x90\n" \
31550- "0: rep; movsl\n" \
31551+ "0: rep; "prefix"movsl\n" \
31552 " movl %3,%0\n" \
31553- "1: rep; movsb\n" \
31554+ "1: rep; "prefix"movsb\n" \
31555 "2:\n" \
31556+ restore \
31557 ".section .fixup,\"ax\"\n" \
31558 "5: addl %3,%0\n" \
31559 " jmp 2b\n" \
31560@@ -538,14 +650,14 @@ do { \
31561 " negl %0\n" \
31562 " andl $7,%0\n" \
31563 " subl %0,%3\n" \
31564- "4: rep; movsb\n" \
31565+ "4: rep; "__copyuser_seg"movsb\n" \
31566 " movl %3,%0\n" \
31567 " shrl $2,%0\n" \
31568 " andl $3,%3\n" \
31569 " .align 2,0x90\n" \
31570- "0: rep; movsl\n" \
31571+ "0: rep; "__copyuser_seg"movsl\n" \
31572 " movl %3,%0\n" \
31573- "1: rep; movsb\n" \
31574+ "1: rep; "__copyuser_seg"movsb\n" \
31575 "2:\n" \
31576 ".section .fixup,\"ax\"\n" \
31577 "5: addl %3,%0\n" \
31578@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31579 {
31580 stac();
31581 if (movsl_is_ok(to, from, n))
31582- __copy_user(to, from, n);
31583+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31584 else
31585- n = __copy_user_intel(to, from, n);
31586+ n = __generic_copy_to_user_intel(to, from, n);
31587 clac();
31588 return n;
31589 }
31590@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31591 {
31592 stac();
31593 if (movsl_is_ok(to, from, n))
31594- __copy_user(to, from, n);
31595+ __copy_user(to, from, n, __copyuser_seg, "", "");
31596 else
31597- n = __copy_user_intel((void __user *)to,
31598- (const void *)from, n);
31599+ n = __generic_copy_from_user_intel(to, from, n);
31600 clac();
31601 return n;
31602 }
31603@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31604 if (n > 64 && cpu_has_xmm2)
31605 n = __copy_user_intel_nocache(to, from, n);
31606 else
31607- __copy_user(to, from, n);
31608+ __copy_user(to, from, n, __copyuser_seg, "", "");
31609 #else
31610- __copy_user(to, from, n);
31611+ __copy_user(to, from, n, __copyuser_seg, "", "");
31612 #endif
31613 clac();
31614 return n;
31615 }
31616 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31617
31618-/**
31619- * copy_to_user: - Copy a block of data into user space.
31620- * @to: Destination address, in user space.
31621- * @from: Source address, in kernel space.
31622- * @n: Number of bytes to copy.
31623- *
31624- * Context: User context only. This function may sleep.
31625- *
31626- * Copy data from kernel space to user space.
31627- *
31628- * Returns number of bytes that could not be copied.
31629- * On success, this will be zero.
31630- */
31631-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31632+#ifdef CONFIG_PAX_MEMORY_UDEREF
31633+void __set_fs(mm_segment_t x)
31634 {
31635- if (access_ok(VERIFY_WRITE, to, n))
31636- n = __copy_to_user(to, from, n);
31637- return n;
31638+ switch (x.seg) {
31639+ case 0:
31640+ loadsegment(gs, 0);
31641+ break;
31642+ case TASK_SIZE_MAX:
31643+ loadsegment(gs, __USER_DS);
31644+ break;
31645+ case -1UL:
31646+ loadsegment(gs, __KERNEL_DS);
31647+ break;
31648+ default:
31649+ BUG();
31650+ }
31651 }
31652-EXPORT_SYMBOL(_copy_to_user);
31653+EXPORT_SYMBOL(__set_fs);
31654
31655-/**
31656- * copy_from_user: - Copy a block of data from user space.
31657- * @to: Destination address, in kernel space.
31658- * @from: Source address, in user space.
31659- * @n: Number of bytes to copy.
31660- *
31661- * Context: User context only. This function may sleep.
31662- *
31663- * Copy data from user space to kernel space.
31664- *
31665- * Returns number of bytes that could not be copied.
31666- * On success, this will be zero.
31667- *
31668- * If some data could not be copied, this function will pad the copied
31669- * data to the requested size using zero bytes.
31670- */
31671-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31672+void set_fs(mm_segment_t x)
31673 {
31674- if (access_ok(VERIFY_READ, from, n))
31675- n = __copy_from_user(to, from, n);
31676- else
31677- memset(to, 0, n);
31678- return n;
31679+ current_thread_info()->addr_limit = x;
31680+ __set_fs(x);
31681 }
31682-EXPORT_SYMBOL(_copy_from_user);
31683+EXPORT_SYMBOL(set_fs);
31684+#endif
31685diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31686index c905e89..01ab928 100644
31687--- a/arch/x86/lib/usercopy_64.c
31688+++ b/arch/x86/lib/usercopy_64.c
31689@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31690 might_fault();
31691 /* no memory constraint because it doesn't change any memory gcc knows
31692 about */
31693+ pax_open_userland();
31694 stac();
31695 asm volatile(
31696 " testq %[size8],%[size8]\n"
31697@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31698 _ASM_EXTABLE(0b,3b)
31699 _ASM_EXTABLE(1b,2b)
31700 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31701- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31702+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31703 [zero] "r" (0UL), [eight] "r" (8UL));
31704 clac();
31705+ pax_close_userland();
31706 return size;
31707 }
31708 EXPORT_SYMBOL(__clear_user);
31709@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31710 }
31711 EXPORT_SYMBOL(clear_user);
31712
31713-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31714+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31715 {
31716- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31717- return copy_user_generic((__force void *)to, (__force void *)from, len);
31718- }
31719- return len;
31720+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31721+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31722+ return len;
31723 }
31724 EXPORT_SYMBOL(copy_in_user);
31725
31726@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31727 * it is not necessary to optimize tail handling.
31728 */
31729 __visible unsigned long
31730-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31731+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31732 {
31733 char c;
31734 unsigned zero_len;
31735
31736+ clac();
31737+ pax_close_userland();
31738 for (; len; --len, to++) {
31739 if (__get_user_nocheck(c, from++, sizeof(char)))
31740 break;
31741@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31742 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31743 if (__put_user_nocheck(c, to++, sizeof(char)))
31744 break;
31745- clac();
31746 return len;
31747 }
31748diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31749index ecfdc46..55b9309 100644
31750--- a/arch/x86/mm/Makefile
31751+++ b/arch/x86/mm/Makefile
31752@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31753 obj-$(CONFIG_MEMTEST) += memtest.o
31754
31755 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31756+
31757+quote:="
31758+obj-$(CONFIG_X86_64) += uderef_64.o
31759+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31760diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31761index 903ec1e..c4166b2 100644
31762--- a/arch/x86/mm/extable.c
31763+++ b/arch/x86/mm/extable.c
31764@@ -6,12 +6,24 @@
31765 static inline unsigned long
31766 ex_insn_addr(const struct exception_table_entry *x)
31767 {
31768- return (unsigned long)&x->insn + x->insn;
31769+ unsigned long reloc = 0;
31770+
31771+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31772+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31773+#endif
31774+
31775+ return (unsigned long)&x->insn + x->insn + reloc;
31776 }
31777 static inline unsigned long
31778 ex_fixup_addr(const struct exception_table_entry *x)
31779 {
31780- return (unsigned long)&x->fixup + x->fixup;
31781+ unsigned long reloc = 0;
31782+
31783+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31784+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31785+#endif
31786+
31787+ return (unsigned long)&x->fixup + x->fixup + reloc;
31788 }
31789
31790 int fixup_exception(struct pt_regs *regs)
31791@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31792 unsigned long new_ip;
31793
31794 #ifdef CONFIG_PNPBIOS
31795- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31796+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31797 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31798 extern u32 pnp_bios_is_utter_crap;
31799 pnp_bios_is_utter_crap = 1;
31800@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31801 i += 4;
31802 p->fixup -= i;
31803 i += 4;
31804+
31805+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31806+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31807+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31808+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31809+#endif
31810+
31811 }
31812 }
31813
31814diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31815index e3ff27a..f38f7c0 100644
31816--- a/arch/x86/mm/fault.c
31817+++ b/arch/x86/mm/fault.c
31818@@ -13,12 +13,19 @@
31819 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31820 #include <linux/prefetch.h> /* prefetchw */
31821 #include <linux/context_tracking.h> /* exception_enter(), ... */
31822+#include <linux/unistd.h>
31823+#include <linux/compiler.h>
31824
31825 #include <asm/traps.h> /* dotraplinkage, ... */
31826 #include <asm/pgalloc.h> /* pgd_*(), ... */
31827 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31828 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31829 #include <asm/vsyscall.h> /* emulate_vsyscall */
31830+#include <asm/tlbflush.h>
31831+
31832+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31833+#include <asm/stacktrace.h>
31834+#endif
31835
31836 #define CREATE_TRACE_POINTS
31837 #include <asm/trace/exceptions.h>
31838@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31839 int ret = 0;
31840
31841 /* kprobe_running() needs smp_processor_id() */
31842- if (kprobes_built_in() && !user_mode_vm(regs)) {
31843+ if (kprobes_built_in() && !user_mode(regs)) {
31844 preempt_disable();
31845 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31846 ret = 1;
31847@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31848 return !instr_lo || (instr_lo>>1) == 1;
31849 case 0x00:
31850 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31851- if (probe_kernel_address(instr, opcode))
31852+ if (user_mode(regs)) {
31853+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31854+ return 0;
31855+ } else if (probe_kernel_address(instr, opcode))
31856 return 0;
31857
31858 *prefetch = (instr_lo == 0xF) &&
31859@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31860 while (instr < max_instr) {
31861 unsigned char opcode;
31862
31863- if (probe_kernel_address(instr, opcode))
31864+ if (user_mode(regs)) {
31865+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31866+ break;
31867+ } else if (probe_kernel_address(instr, opcode))
31868 break;
31869
31870 instr++;
31871@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31872 force_sig_info(si_signo, &info, tsk);
31873 }
31874
31875+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31876+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31877+#endif
31878+
31879+#ifdef CONFIG_PAX_EMUTRAMP
31880+static int pax_handle_fetch_fault(struct pt_regs *regs);
31881+#endif
31882+
31883+#ifdef CONFIG_PAX_PAGEEXEC
31884+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31885+{
31886+ pgd_t *pgd;
31887+ pud_t *pud;
31888+ pmd_t *pmd;
31889+
31890+ pgd = pgd_offset(mm, address);
31891+ if (!pgd_present(*pgd))
31892+ return NULL;
31893+ pud = pud_offset(pgd, address);
31894+ if (!pud_present(*pud))
31895+ return NULL;
31896+ pmd = pmd_offset(pud, address);
31897+ if (!pmd_present(*pmd))
31898+ return NULL;
31899+ return pmd;
31900+}
31901+#endif
31902+
31903 DEFINE_SPINLOCK(pgd_lock);
31904 LIST_HEAD(pgd_list);
31905
31906@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31907 for (address = VMALLOC_START & PMD_MASK;
31908 address >= TASK_SIZE && address < FIXADDR_TOP;
31909 address += PMD_SIZE) {
31910+
31911+#ifdef CONFIG_PAX_PER_CPU_PGD
31912+ unsigned long cpu;
31913+#else
31914 struct page *page;
31915+#endif
31916
31917 spin_lock(&pgd_lock);
31918+
31919+#ifdef CONFIG_PAX_PER_CPU_PGD
31920+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31921+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31922+ pmd_t *ret;
31923+
31924+ ret = vmalloc_sync_one(pgd, address);
31925+ if (!ret)
31926+ break;
31927+ pgd = get_cpu_pgd(cpu, kernel);
31928+#else
31929 list_for_each_entry(page, &pgd_list, lru) {
31930+ pgd_t *pgd;
31931 spinlock_t *pgt_lock;
31932 pmd_t *ret;
31933
31934@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31935 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31936
31937 spin_lock(pgt_lock);
31938- ret = vmalloc_sync_one(page_address(page), address);
31939+ pgd = page_address(page);
31940+#endif
31941+
31942+ ret = vmalloc_sync_one(pgd, address);
31943+
31944+#ifndef CONFIG_PAX_PER_CPU_PGD
31945 spin_unlock(pgt_lock);
31946+#endif
31947
31948 if (!ret)
31949 break;
31950@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31951 * an interrupt in the middle of a task switch..
31952 */
31953 pgd_paddr = read_cr3();
31954+
31955+#ifdef CONFIG_PAX_PER_CPU_PGD
31956+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31957+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31958+#endif
31959+
31960 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31961 if (!pmd_k)
31962 return -1;
31963@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31964 * happen within a race in page table update. In the later
31965 * case just flush:
31966 */
31967- pgd = pgd_offset(current->active_mm, address);
31968+
31969 pgd_ref = pgd_offset_k(address);
31970 if (pgd_none(*pgd_ref))
31971 return -1;
31972
31973+#ifdef CONFIG_PAX_PER_CPU_PGD
31974+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31975+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31976+ if (pgd_none(*pgd)) {
31977+ set_pgd(pgd, *pgd_ref);
31978+ arch_flush_lazy_mmu_mode();
31979+ } else {
31980+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31981+ }
31982+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31983+#else
31984+ pgd = pgd_offset(current->active_mm, address);
31985+#endif
31986+
31987 if (pgd_none(*pgd)) {
31988 set_pgd(pgd, *pgd_ref);
31989 arch_flush_lazy_mmu_mode();
31990@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31991 static int is_errata100(struct pt_regs *regs, unsigned long address)
31992 {
31993 #ifdef CONFIG_X86_64
31994- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31995+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31996 return 1;
31997 #endif
31998 return 0;
31999@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32000 }
32001
32002 static const char nx_warning[] = KERN_CRIT
32003-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32004+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32005 static const char smep_warning[] = KERN_CRIT
32006-"unable to execute userspace code (SMEP?) (uid: %d)\n";
32007+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32008
32009 static void
32010 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32011@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32012 if (!oops_may_print())
32013 return;
32014
32015- if (error_code & PF_INSTR) {
32016+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32017 unsigned int level;
32018 pgd_t *pgd;
32019 pte_t *pte;
32020@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32021 pte = lookup_address_in_pgd(pgd, address, &level);
32022
32023 if (pte && pte_present(*pte) && !pte_exec(*pte))
32024- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32025+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32026 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32027 (pgd_flags(*pgd) & _PAGE_USER) &&
32028 (read_cr4() & X86_CR4_SMEP))
32029- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32030+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32031 }
32032
32033+#ifdef CONFIG_PAX_KERNEXEC
32034+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32035+ if (current->signal->curr_ip)
32036+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32037+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32038+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32039+ else
32040+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32041+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32042+ }
32043+#endif
32044+
32045 printk(KERN_ALERT "BUG: unable to handle kernel ");
32046 if (address < PAGE_SIZE)
32047 printk(KERN_CONT "NULL pointer dereference");
32048@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32049 return;
32050 }
32051 #endif
32052+
32053+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32054+ if (pax_is_fetch_fault(regs, error_code, address)) {
32055+
32056+#ifdef CONFIG_PAX_EMUTRAMP
32057+ switch (pax_handle_fetch_fault(regs)) {
32058+ case 2:
32059+ return;
32060+ }
32061+#endif
32062+
32063+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32064+ do_group_exit(SIGKILL);
32065+ }
32066+#endif
32067+
32068 /* Kernel addresses are always protection faults: */
32069 if (address >= TASK_SIZE)
32070 error_code |= PF_PROT;
32071@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32072 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32073 printk(KERN_ERR
32074 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32075- tsk->comm, tsk->pid, address);
32076+ tsk->comm, task_pid_nr(tsk), address);
32077 code = BUS_MCEERR_AR;
32078 }
32079 #endif
32080@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32081 return 1;
32082 }
32083
32084+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32085+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32086+{
32087+ pte_t *pte;
32088+ pmd_t *pmd;
32089+ spinlock_t *ptl;
32090+ unsigned char pte_mask;
32091+
32092+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32093+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32094+ return 0;
32095+
32096+ /* PaX: it's our fault, let's handle it if we can */
32097+
32098+ /* PaX: take a look at read faults before acquiring any locks */
32099+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32100+ /* instruction fetch attempt from a protected page in user mode */
32101+ up_read(&mm->mmap_sem);
32102+
32103+#ifdef CONFIG_PAX_EMUTRAMP
32104+ switch (pax_handle_fetch_fault(regs)) {
32105+ case 2:
32106+ return 1;
32107+ }
32108+#endif
32109+
32110+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32111+ do_group_exit(SIGKILL);
32112+ }
32113+
32114+ pmd = pax_get_pmd(mm, address);
32115+ if (unlikely(!pmd))
32116+ return 0;
32117+
32118+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32119+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32120+ pte_unmap_unlock(pte, ptl);
32121+ return 0;
32122+ }
32123+
32124+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32125+ /* write attempt to a protected page in user mode */
32126+ pte_unmap_unlock(pte, ptl);
32127+ return 0;
32128+ }
32129+
32130+#ifdef CONFIG_SMP
32131+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32132+#else
32133+ if (likely(address > get_limit(regs->cs)))
32134+#endif
32135+ {
32136+ set_pte(pte, pte_mkread(*pte));
32137+ __flush_tlb_one(address);
32138+ pte_unmap_unlock(pte, ptl);
32139+ up_read(&mm->mmap_sem);
32140+ return 1;
32141+ }
32142+
32143+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32144+
32145+ /*
32146+ * PaX: fill DTLB with user rights and retry
32147+ */
32148+ __asm__ __volatile__ (
32149+ "orb %2,(%1)\n"
32150+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32151+/*
32152+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32153+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32154+ * page fault when examined during a TLB load attempt. this is true not only
32155+ * for PTEs holding a non-present entry but also present entries that will
32156+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32157+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32158+ * for our target pages since their PTEs are simply not in the TLBs at all.
32159+
32160+ * the best thing in omitting it is that we gain around 15-20% speed in the
32161+ * fast path of the page fault handler and can get rid of tracing since we
32162+ * can no longer flush unintended entries.
32163+ */
32164+ "invlpg (%0)\n"
32165+#endif
32166+ __copyuser_seg"testb $0,(%0)\n"
32167+ "xorb %3,(%1)\n"
32168+ :
32169+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32170+ : "memory", "cc");
32171+ pte_unmap_unlock(pte, ptl);
32172+ up_read(&mm->mmap_sem);
32173+ return 1;
32174+}
32175+#endif
32176+
32177 /*
32178 * Handle a spurious fault caused by a stale TLB entry.
32179 *
32180@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32181 static inline int
32182 access_error(unsigned long error_code, struct vm_area_struct *vma)
32183 {
32184+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32185+ return 1;
32186+
32187 if (error_code & PF_WRITE) {
32188 /* write, present and write, not present: */
32189 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32190@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32191 if (error_code & PF_USER)
32192 return false;
32193
32194- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32195+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32196 return false;
32197
32198 return true;
32199@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32200 tsk = current;
32201 mm = tsk->mm;
32202
32203+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32204+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32205+ if (!search_exception_tables(regs->ip)) {
32206+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32207+ bad_area_nosemaphore(regs, error_code, address);
32208+ return;
32209+ }
32210+ if (address < pax_user_shadow_base) {
32211+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32212+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32213+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32214+ } else
32215+ address -= pax_user_shadow_base;
32216+ }
32217+#endif
32218+
32219 /*
32220 * Detect and handle instructions that would cause a page fault for
32221 * both a tracked kernel page and a userspace page.
32222@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32223 * User-mode registers count as a user access even for any
32224 * potential system fault or CPU buglet:
32225 */
32226- if (user_mode_vm(regs)) {
32227+ if (user_mode(regs)) {
32228 local_irq_enable();
32229 error_code |= PF_USER;
32230 flags |= FAULT_FLAG_USER;
32231@@ -1187,6 +1411,11 @@ retry:
32232 might_sleep();
32233 }
32234
32235+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32236+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32237+ return;
32238+#endif
32239+
32240 vma = find_vma(mm, address);
32241 if (unlikely(!vma)) {
32242 bad_area(regs, error_code, address);
32243@@ -1198,18 +1427,24 @@ retry:
32244 bad_area(regs, error_code, address);
32245 return;
32246 }
32247- if (error_code & PF_USER) {
32248- /*
32249- * Accessing the stack below %sp is always a bug.
32250- * The large cushion allows instructions like enter
32251- * and pusha to work. ("enter $65535, $31" pushes
32252- * 32 pointers and then decrements %sp by 65535.)
32253- */
32254- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32255- bad_area(regs, error_code, address);
32256- return;
32257- }
32258+ /*
32259+ * Accessing the stack below %sp is always a bug.
32260+ * The large cushion allows instructions like enter
32261+ * and pusha to work. ("enter $65535, $31" pushes
32262+ * 32 pointers and then decrements %sp by 65535.)
32263+ */
32264+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32265+ bad_area(regs, error_code, address);
32266+ return;
32267 }
32268+
32269+#ifdef CONFIG_PAX_SEGMEXEC
32270+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32271+ bad_area(regs, error_code, address);
32272+ return;
32273+ }
32274+#endif
32275+
32276 if (unlikely(expand_stack(vma, address))) {
32277 bad_area(regs, error_code, address);
32278 return;
32279@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32280 }
32281 NOKPROBE_SYMBOL(trace_do_page_fault);
32282 #endif /* CONFIG_TRACING */
32283+
32284+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32285+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32286+{
32287+ struct mm_struct *mm = current->mm;
32288+ unsigned long ip = regs->ip;
32289+
32290+ if (v8086_mode(regs))
32291+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32292+
32293+#ifdef CONFIG_PAX_PAGEEXEC
32294+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32295+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32296+ return true;
32297+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32298+ return true;
32299+ return false;
32300+ }
32301+#endif
32302+
32303+#ifdef CONFIG_PAX_SEGMEXEC
32304+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32305+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32306+ return true;
32307+ return false;
32308+ }
32309+#endif
32310+
32311+ return false;
32312+}
32313+#endif
32314+
32315+#ifdef CONFIG_PAX_EMUTRAMP
32316+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32317+{
32318+ int err;
32319+
32320+ do { /* PaX: libffi trampoline emulation */
32321+ unsigned char mov, jmp;
32322+ unsigned int addr1, addr2;
32323+
32324+#ifdef CONFIG_X86_64
32325+ if ((regs->ip + 9) >> 32)
32326+ break;
32327+#endif
32328+
32329+ err = get_user(mov, (unsigned char __user *)regs->ip);
32330+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32331+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32332+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32333+
32334+ if (err)
32335+ break;
32336+
32337+ if (mov == 0xB8 && jmp == 0xE9) {
32338+ regs->ax = addr1;
32339+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32340+ return 2;
32341+ }
32342+ } while (0);
32343+
32344+ do { /* PaX: gcc trampoline emulation #1 */
32345+ unsigned char mov1, mov2;
32346+ unsigned short jmp;
32347+ unsigned int addr1, addr2;
32348+
32349+#ifdef CONFIG_X86_64
32350+ if ((regs->ip + 11) >> 32)
32351+ break;
32352+#endif
32353+
32354+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32355+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32356+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32357+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32358+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32359+
32360+ if (err)
32361+ break;
32362+
32363+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32364+ regs->cx = addr1;
32365+ regs->ax = addr2;
32366+ regs->ip = addr2;
32367+ return 2;
32368+ }
32369+ } while (0);
32370+
32371+ do { /* PaX: gcc trampoline emulation #2 */
32372+ unsigned char mov, jmp;
32373+ unsigned int addr1, addr2;
32374+
32375+#ifdef CONFIG_X86_64
32376+ if ((regs->ip + 9) >> 32)
32377+ break;
32378+#endif
32379+
32380+ err = get_user(mov, (unsigned char __user *)regs->ip);
32381+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32382+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32383+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32384+
32385+ if (err)
32386+ break;
32387+
32388+ if (mov == 0xB9 && jmp == 0xE9) {
32389+ regs->cx = addr1;
32390+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32391+ return 2;
32392+ }
32393+ } while (0);
32394+
32395+ return 1; /* PaX in action */
32396+}
32397+
32398+#ifdef CONFIG_X86_64
32399+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32400+{
32401+ int err;
32402+
32403+ do { /* PaX: libffi trampoline emulation */
32404+ unsigned short mov1, mov2, jmp1;
32405+ unsigned char stcclc, jmp2;
32406+ unsigned long addr1, addr2;
32407+
32408+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32409+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32410+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32411+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32412+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32413+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32414+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32415+
32416+ if (err)
32417+ break;
32418+
32419+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32420+ regs->r11 = addr1;
32421+ regs->r10 = addr2;
32422+ if (stcclc == 0xF8)
32423+ regs->flags &= ~X86_EFLAGS_CF;
32424+ else
32425+ regs->flags |= X86_EFLAGS_CF;
32426+ regs->ip = addr1;
32427+ return 2;
32428+ }
32429+ } while (0);
32430+
32431+ do { /* PaX: gcc trampoline emulation #1 */
32432+ unsigned short mov1, mov2, jmp1;
32433+ unsigned char jmp2;
32434+ unsigned int addr1;
32435+ unsigned long addr2;
32436+
32437+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32438+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32439+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32440+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32441+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32442+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32443+
32444+ if (err)
32445+ break;
32446+
32447+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32448+ regs->r11 = addr1;
32449+ regs->r10 = addr2;
32450+ regs->ip = addr1;
32451+ return 2;
32452+ }
32453+ } while (0);
32454+
32455+ do { /* PaX: gcc trampoline emulation #2 */
32456+ unsigned short mov1, mov2, jmp1;
32457+ unsigned char jmp2;
32458+ unsigned long addr1, addr2;
32459+
32460+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32461+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32462+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32463+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32464+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32465+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32466+
32467+ if (err)
32468+ break;
32469+
32470+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32471+ regs->r11 = addr1;
32472+ regs->r10 = addr2;
32473+ regs->ip = addr1;
32474+ return 2;
32475+ }
32476+ } while (0);
32477+
32478+ return 1; /* PaX in action */
32479+}
32480+#endif
32481+
32482+/*
32483+ * PaX: decide what to do with offenders (regs->ip = fault address)
32484+ *
32485+ * returns 1 when task should be killed
32486+ * 2 when gcc trampoline was detected
32487+ */
32488+static int pax_handle_fetch_fault(struct pt_regs *regs)
32489+{
32490+ if (v8086_mode(regs))
32491+ return 1;
32492+
32493+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32494+ return 1;
32495+
32496+#ifdef CONFIG_X86_32
32497+ return pax_handle_fetch_fault_32(regs);
32498+#else
32499+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32500+ return pax_handle_fetch_fault_32(regs);
32501+ else
32502+ return pax_handle_fetch_fault_64(regs);
32503+#endif
32504+}
32505+#endif
32506+
32507+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32508+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32509+{
32510+ long i;
32511+
32512+ printk(KERN_ERR "PAX: bytes at PC: ");
32513+ for (i = 0; i < 20; i++) {
32514+ unsigned char c;
32515+ if (get_user(c, (unsigned char __force_user *)pc+i))
32516+ printk(KERN_CONT "?? ");
32517+ else
32518+ printk(KERN_CONT "%02x ", c);
32519+ }
32520+ printk("\n");
32521+
32522+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32523+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32524+ unsigned long c;
32525+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32526+#ifdef CONFIG_X86_32
32527+ printk(KERN_CONT "???????? ");
32528+#else
32529+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32530+ printk(KERN_CONT "???????? ???????? ");
32531+ else
32532+ printk(KERN_CONT "???????????????? ");
32533+#endif
32534+ } else {
32535+#ifdef CONFIG_X86_64
32536+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32537+ printk(KERN_CONT "%08x ", (unsigned int)c);
32538+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32539+ } else
32540+#endif
32541+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32542+ }
32543+ }
32544+ printk("\n");
32545+}
32546+#endif
32547+
32548+/**
32549+ * probe_kernel_write(): safely attempt to write to a location
32550+ * @dst: address to write to
32551+ * @src: pointer to the data that shall be written
32552+ * @size: size of the data chunk
32553+ *
32554+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32555+ * happens, handle that and return -EFAULT.
32556+ */
32557+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32558+{
32559+ long ret;
32560+ mm_segment_t old_fs = get_fs();
32561+
32562+ set_fs(KERNEL_DS);
32563+ pagefault_disable();
32564+ pax_open_kernel();
32565+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32566+ pax_close_kernel();
32567+ pagefault_enable();
32568+ set_fs(old_fs);
32569+
32570+ return ret ? -EFAULT : 0;
32571+}
32572diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32573index 224b142..c2c9423 100644
32574--- a/arch/x86/mm/gup.c
32575+++ b/arch/x86/mm/gup.c
32576@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32577 addr = start;
32578 len = (unsigned long) nr_pages << PAGE_SHIFT;
32579 end = start + len;
32580- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32581+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32582 (void __user *)start, len)))
32583 return 0;
32584
32585@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32586 goto slow_irqon;
32587 #endif
32588
32589+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32590+ (void __user *)start, len)))
32591+ return 0;
32592+
32593 /*
32594 * XXX: batch / limit 'nr', to avoid large irq off latency
32595 * needs some instrumenting to determine the common sizes used by
32596diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32597index 4500142..53a363c 100644
32598--- a/arch/x86/mm/highmem_32.c
32599+++ b/arch/x86/mm/highmem_32.c
32600@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32601 idx = type + KM_TYPE_NR*smp_processor_id();
32602 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32603 BUG_ON(!pte_none(*(kmap_pte-idx)));
32604+
32605+ pax_open_kernel();
32606 set_pte(kmap_pte-idx, mk_pte(page, prot));
32607+ pax_close_kernel();
32608+
32609 arch_flush_lazy_mmu_mode();
32610
32611 return (void *)vaddr;
32612diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32613index 006cc91..bf05a83 100644
32614--- a/arch/x86/mm/hugetlbpage.c
32615+++ b/arch/x86/mm/hugetlbpage.c
32616@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32617 #ifdef CONFIG_HUGETLB_PAGE
32618 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32619 unsigned long addr, unsigned long len,
32620- unsigned long pgoff, unsigned long flags)
32621+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32622 {
32623 struct hstate *h = hstate_file(file);
32624 struct vm_unmapped_area_info info;
32625-
32626+
32627 info.flags = 0;
32628 info.length = len;
32629 info.low_limit = current->mm->mmap_legacy_base;
32630 info.high_limit = TASK_SIZE;
32631 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32632 info.align_offset = 0;
32633+ info.threadstack_offset = offset;
32634 return vm_unmapped_area(&info);
32635 }
32636
32637 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32638 unsigned long addr0, unsigned long len,
32639- unsigned long pgoff, unsigned long flags)
32640+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32641 {
32642 struct hstate *h = hstate_file(file);
32643 struct vm_unmapped_area_info info;
32644@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32645 info.high_limit = current->mm->mmap_base;
32646 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32647 info.align_offset = 0;
32648+ info.threadstack_offset = offset;
32649 addr = vm_unmapped_area(&info);
32650
32651 /*
32652@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32653 VM_BUG_ON(addr != -ENOMEM);
32654 info.flags = 0;
32655 info.low_limit = TASK_UNMAPPED_BASE;
32656+
32657+#ifdef CONFIG_PAX_RANDMMAP
32658+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32659+ info.low_limit += current->mm->delta_mmap;
32660+#endif
32661+
32662 info.high_limit = TASK_SIZE;
32663 addr = vm_unmapped_area(&info);
32664 }
32665@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32666 struct hstate *h = hstate_file(file);
32667 struct mm_struct *mm = current->mm;
32668 struct vm_area_struct *vma;
32669+ unsigned long pax_task_size = TASK_SIZE;
32670+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32671
32672 if (len & ~huge_page_mask(h))
32673 return -EINVAL;
32674- if (len > TASK_SIZE)
32675+
32676+#ifdef CONFIG_PAX_SEGMEXEC
32677+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32678+ pax_task_size = SEGMEXEC_TASK_SIZE;
32679+#endif
32680+
32681+ pax_task_size -= PAGE_SIZE;
32682+
32683+ if (len > pax_task_size)
32684 return -ENOMEM;
32685
32686 if (flags & MAP_FIXED) {
32687@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32688 return addr;
32689 }
32690
32691+#ifdef CONFIG_PAX_RANDMMAP
32692+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32693+#endif
32694+
32695 if (addr) {
32696 addr = ALIGN(addr, huge_page_size(h));
32697 vma = find_vma(mm, addr);
32698- if (TASK_SIZE - len >= addr &&
32699- (!vma || addr + len <= vma->vm_start))
32700+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32701 return addr;
32702 }
32703 if (mm->get_unmapped_area == arch_get_unmapped_area)
32704 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32705- pgoff, flags);
32706+ pgoff, flags, offset);
32707 else
32708 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32709- pgoff, flags);
32710+ pgoff, flags, offset);
32711 }
32712 #endif /* CONFIG_HUGETLB_PAGE */
32713
32714diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32715index 079c3b6..7069023 100644
32716--- a/arch/x86/mm/init.c
32717+++ b/arch/x86/mm/init.c
32718@@ -4,6 +4,7 @@
32719 #include <linux/swap.h>
32720 #include <linux/memblock.h>
32721 #include <linux/bootmem.h> /* for max_low_pfn */
32722+#include <linux/tboot.h>
32723
32724 #include <asm/cacheflush.h>
32725 #include <asm/e820.h>
32726@@ -17,6 +18,8 @@
32727 #include <asm/proto.h>
32728 #include <asm/dma.h> /* for MAX_DMA_PFN */
32729 #include <asm/microcode.h>
32730+#include <asm/desc.h>
32731+#include <asm/bios_ebda.h>
32732
32733 /*
32734 * We need to define the tracepoints somewhere, and tlb.c
32735@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32736 early_ioremap_page_table_range_init();
32737 #endif
32738
32739+#ifdef CONFIG_PAX_PER_CPU_PGD
32740+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32741+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32742+ KERNEL_PGD_PTRS);
32743+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32744+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32745+ KERNEL_PGD_PTRS);
32746+ load_cr3(get_cpu_pgd(0, kernel));
32747+#else
32748 load_cr3(swapper_pg_dir);
32749+#endif
32750+
32751 __flush_tlb_all();
32752
32753 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32754@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32755 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32756 * mmio resources as well as potential bios/acpi data regions.
32757 */
32758+
32759+#ifdef CONFIG_GRKERNSEC_KMEM
32760+static unsigned int ebda_start __read_only;
32761+static unsigned int ebda_end __read_only;
32762+#endif
32763+
32764 int devmem_is_allowed(unsigned long pagenr)
32765 {
32766- if (pagenr < 256)
32767+#ifdef CONFIG_GRKERNSEC_KMEM
32768+ /* allow BDA */
32769+ if (!pagenr)
32770 return 1;
32771+ /* allow EBDA */
32772+ if (pagenr >= ebda_start && pagenr < ebda_end)
32773+ return 1;
32774+ /* if tboot is in use, allow access to its hardcoded serial log range */
32775+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32776+ return 1;
32777+#else
32778+ if (!pagenr)
32779+ return 1;
32780+#ifdef CONFIG_VM86
32781+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32782+ return 1;
32783+#endif
32784+#endif
32785+
32786+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32787+ return 1;
32788+#ifdef CONFIG_GRKERNSEC_KMEM
32789+ /* throw out everything else below 1MB */
32790+ if (pagenr <= 256)
32791+ return 0;
32792+#endif
32793 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32794 return 0;
32795 if (!page_is_ram(pagenr))
32796@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32797 #endif
32798 }
32799
32800+#ifdef CONFIG_GRKERNSEC_KMEM
32801+static inline void gr_init_ebda(void)
32802+{
32803+ unsigned int ebda_addr;
32804+ unsigned int ebda_size = 0;
32805+
32806+ ebda_addr = get_bios_ebda();
32807+ if (ebda_addr) {
32808+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32809+ ebda_size <<= 10;
32810+ }
32811+ if (ebda_addr && ebda_size) {
32812+ ebda_start = ebda_addr >> PAGE_SHIFT;
32813+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32814+ } else {
32815+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32816+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32817+ }
32818+}
32819+#else
32820+static inline void gr_init_ebda(void) { }
32821+#endif
32822+
32823 void free_initmem(void)
32824 {
32825+#ifdef CONFIG_PAX_KERNEXEC
32826+#ifdef CONFIG_X86_32
32827+ /* PaX: limit KERNEL_CS to actual size */
32828+ unsigned long addr, limit;
32829+ struct desc_struct d;
32830+ int cpu;
32831+#else
32832+ pgd_t *pgd;
32833+ pud_t *pud;
32834+ pmd_t *pmd;
32835+ unsigned long addr, end;
32836+#endif
32837+#endif
32838+
32839+ gr_init_ebda();
32840+
32841+#ifdef CONFIG_PAX_KERNEXEC
32842+#ifdef CONFIG_X86_32
32843+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32844+ limit = (limit - 1UL) >> PAGE_SHIFT;
32845+
32846+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32847+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32848+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32849+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32850+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32851+ }
32852+
32853+ /* PaX: make KERNEL_CS read-only */
32854+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32855+ if (!paravirt_enabled())
32856+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32857+/*
32858+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32859+ pgd = pgd_offset_k(addr);
32860+ pud = pud_offset(pgd, addr);
32861+ pmd = pmd_offset(pud, addr);
32862+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32863+ }
32864+*/
32865+#ifdef CONFIG_X86_PAE
32866+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32867+/*
32868+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32869+ pgd = pgd_offset_k(addr);
32870+ pud = pud_offset(pgd, addr);
32871+ pmd = pmd_offset(pud, addr);
32872+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32873+ }
32874+*/
32875+#endif
32876+
32877+#ifdef CONFIG_MODULES
32878+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32879+#endif
32880+
32881+#else
32882+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32883+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32884+ pgd = pgd_offset_k(addr);
32885+ pud = pud_offset(pgd, addr);
32886+ pmd = pmd_offset(pud, addr);
32887+ if (!pmd_present(*pmd))
32888+ continue;
32889+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32890+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32891+ else
32892+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32893+ }
32894+
32895+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32896+ end = addr + KERNEL_IMAGE_SIZE;
32897+ for (; addr < end; addr += PMD_SIZE) {
32898+ pgd = pgd_offset_k(addr);
32899+ pud = pud_offset(pgd, addr);
32900+ pmd = pmd_offset(pud, addr);
32901+ if (!pmd_present(*pmd))
32902+ continue;
32903+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32904+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32905+ }
32906+#endif
32907+
32908+ flush_tlb_all();
32909+#endif
32910+
32911 free_init_pages("unused kernel",
32912 (unsigned long)(&__init_begin),
32913 (unsigned long)(&__init_end));
32914diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32915index c8140e1..59257fc 100644
32916--- a/arch/x86/mm/init_32.c
32917+++ b/arch/x86/mm/init_32.c
32918@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32919 bool __read_mostly __vmalloc_start_set = false;
32920
32921 /*
32922- * Creates a middle page table and puts a pointer to it in the
32923- * given global directory entry. This only returns the gd entry
32924- * in non-PAE compilation mode, since the middle layer is folded.
32925- */
32926-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32927-{
32928- pud_t *pud;
32929- pmd_t *pmd_table;
32930-
32931-#ifdef CONFIG_X86_PAE
32932- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32933- pmd_table = (pmd_t *)alloc_low_page();
32934- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32935- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32936- pud = pud_offset(pgd, 0);
32937- BUG_ON(pmd_table != pmd_offset(pud, 0));
32938-
32939- return pmd_table;
32940- }
32941-#endif
32942- pud = pud_offset(pgd, 0);
32943- pmd_table = pmd_offset(pud, 0);
32944-
32945- return pmd_table;
32946-}
32947-
32948-/*
32949 * Create a page table and place a pointer to it in a middle page
32950 * directory entry:
32951 */
32952@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32953 pte_t *page_table = (pte_t *)alloc_low_page();
32954
32955 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32956+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32957+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32958+#else
32959 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32960+#endif
32961 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32962 }
32963
32964 return pte_offset_kernel(pmd, 0);
32965 }
32966
32967+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32968+{
32969+ pud_t *pud;
32970+ pmd_t *pmd_table;
32971+
32972+ pud = pud_offset(pgd, 0);
32973+ pmd_table = pmd_offset(pud, 0);
32974+
32975+ return pmd_table;
32976+}
32977+
32978 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32979 {
32980 int pgd_idx = pgd_index(vaddr);
32981@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32982 int pgd_idx, pmd_idx;
32983 unsigned long vaddr;
32984 pgd_t *pgd;
32985+ pud_t *pud;
32986 pmd_t *pmd;
32987 pte_t *pte = NULL;
32988 unsigned long count = page_table_range_init_count(start, end);
32989@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32990 pgd = pgd_base + pgd_idx;
32991
32992 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32993- pmd = one_md_table_init(pgd);
32994- pmd = pmd + pmd_index(vaddr);
32995+ pud = pud_offset(pgd, vaddr);
32996+ pmd = pmd_offset(pud, vaddr);
32997+
32998+#ifdef CONFIG_X86_PAE
32999+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33000+#endif
33001+
33002 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33003 pmd++, pmd_idx++) {
33004 pte = page_table_kmap_check(one_page_table_init(pmd),
33005@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33006 }
33007 }
33008
33009-static inline int is_kernel_text(unsigned long addr)
33010+static inline int is_kernel_text(unsigned long start, unsigned long end)
33011 {
33012- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33013- return 1;
33014- return 0;
33015+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33016+ end <= ktla_ktva((unsigned long)_stext)) &&
33017+ (start >= ktla_ktva((unsigned long)_einittext) ||
33018+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33019+
33020+#ifdef CONFIG_ACPI_SLEEP
33021+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33022+#endif
33023+
33024+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33025+ return 0;
33026+ return 1;
33027 }
33028
33029 /*
33030@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33031 unsigned long last_map_addr = end;
33032 unsigned long start_pfn, end_pfn;
33033 pgd_t *pgd_base = swapper_pg_dir;
33034- int pgd_idx, pmd_idx, pte_ofs;
33035+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33036 unsigned long pfn;
33037 pgd_t *pgd;
33038+ pud_t *pud;
33039 pmd_t *pmd;
33040 pte_t *pte;
33041 unsigned pages_2m, pages_4k;
33042@@ -291,8 +295,13 @@ repeat:
33043 pfn = start_pfn;
33044 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33045 pgd = pgd_base + pgd_idx;
33046- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33047- pmd = one_md_table_init(pgd);
33048+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33049+ pud = pud_offset(pgd, 0);
33050+ pmd = pmd_offset(pud, 0);
33051+
33052+#ifdef CONFIG_X86_PAE
33053+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33054+#endif
33055
33056 if (pfn >= end_pfn)
33057 continue;
33058@@ -304,14 +313,13 @@ repeat:
33059 #endif
33060 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33061 pmd++, pmd_idx++) {
33062- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33063+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33064
33065 /*
33066 * Map with big pages if possible, otherwise
33067 * create normal page tables:
33068 */
33069 if (use_pse) {
33070- unsigned int addr2;
33071 pgprot_t prot = PAGE_KERNEL_LARGE;
33072 /*
33073 * first pass will use the same initial
33074@@ -322,11 +330,7 @@ repeat:
33075 _PAGE_PSE);
33076
33077 pfn &= PMD_MASK >> PAGE_SHIFT;
33078- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33079- PAGE_OFFSET + PAGE_SIZE-1;
33080-
33081- if (is_kernel_text(addr) ||
33082- is_kernel_text(addr2))
33083+ if (is_kernel_text(address, address + PMD_SIZE))
33084 prot = PAGE_KERNEL_LARGE_EXEC;
33085
33086 pages_2m++;
33087@@ -343,7 +347,7 @@ repeat:
33088 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33089 pte += pte_ofs;
33090 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33091- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33092+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33093 pgprot_t prot = PAGE_KERNEL;
33094 /*
33095 * first pass will use the same initial
33096@@ -351,7 +355,7 @@ repeat:
33097 */
33098 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33099
33100- if (is_kernel_text(addr))
33101+ if (is_kernel_text(address, address + PAGE_SIZE))
33102 prot = PAGE_KERNEL_EXEC;
33103
33104 pages_4k++;
33105@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33106
33107 pud = pud_offset(pgd, va);
33108 pmd = pmd_offset(pud, va);
33109- if (!pmd_present(*pmd))
33110+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33111 break;
33112
33113 /* should not be large page here */
33114@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33115
33116 static void __init pagetable_init(void)
33117 {
33118- pgd_t *pgd_base = swapper_pg_dir;
33119-
33120- permanent_kmaps_init(pgd_base);
33121+ permanent_kmaps_init(swapper_pg_dir);
33122 }
33123
33124-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33125+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33126 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33127
33128 /* user-defined highmem size */
33129@@ -787,10 +789,10 @@ void __init mem_init(void)
33130 ((unsigned long)&__init_end -
33131 (unsigned long)&__init_begin) >> 10,
33132
33133- (unsigned long)&_etext, (unsigned long)&_edata,
33134- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33135+ (unsigned long)&_sdata, (unsigned long)&_edata,
33136+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33137
33138- (unsigned long)&_text, (unsigned long)&_etext,
33139+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33140 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33141
33142 /*
33143@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33144 if (!kernel_set_to_readonly)
33145 return;
33146
33147+ start = ktla_ktva(start);
33148 pr_debug("Set kernel text: %lx - %lx for read write\n",
33149 start, start+size);
33150
33151@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33152 if (!kernel_set_to_readonly)
33153 return;
33154
33155+ start = ktla_ktva(start);
33156 pr_debug("Set kernel text: %lx - %lx for read only\n",
33157 start, start+size);
33158
33159@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33160 unsigned long start = PFN_ALIGN(_text);
33161 unsigned long size = PFN_ALIGN(_etext) - start;
33162
33163+ start = ktla_ktva(start);
33164 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33165 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33166 size >> 10);
33167diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33168index 30eb05a..ae671ac 100644
33169--- a/arch/x86/mm/init_64.c
33170+++ b/arch/x86/mm/init_64.c
33171@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33172 * around without checking the pgd every time.
33173 */
33174
33175-pteval_t __supported_pte_mask __read_mostly = ~0;
33176+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33177 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33178
33179 int force_personality32;
33180@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33181
33182 for (address = start; address <= end; address += PGDIR_SIZE) {
33183 const pgd_t *pgd_ref = pgd_offset_k(address);
33184+
33185+#ifdef CONFIG_PAX_PER_CPU_PGD
33186+ unsigned long cpu;
33187+#else
33188 struct page *page;
33189+#endif
33190
33191 /*
33192 * When it is called after memory hot remove, pgd_none()
33193@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33194 continue;
33195
33196 spin_lock(&pgd_lock);
33197+
33198+#ifdef CONFIG_PAX_PER_CPU_PGD
33199+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33200+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33201+
33202+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33203+ BUG_ON(pgd_page_vaddr(*pgd)
33204+ != pgd_page_vaddr(*pgd_ref));
33205+
33206+ if (removed) {
33207+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33208+ pgd_clear(pgd);
33209+ } else {
33210+ if (pgd_none(*pgd))
33211+ set_pgd(pgd, *pgd_ref);
33212+ }
33213+
33214+ pgd = pgd_offset_cpu(cpu, kernel, address);
33215+#else
33216 list_for_each_entry(page, &pgd_list, lru) {
33217 pgd_t *pgd;
33218 spinlock_t *pgt_lock;
33219@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33220 /* the pgt_lock only for Xen */
33221 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33222 spin_lock(pgt_lock);
33223+#endif
33224
33225 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33226 BUG_ON(pgd_page_vaddr(*pgd)
33227@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33228 set_pgd(pgd, *pgd_ref);
33229 }
33230
33231+#ifndef CONFIG_PAX_PER_CPU_PGD
33232 spin_unlock(pgt_lock);
33233+#endif
33234+
33235 }
33236 spin_unlock(&pgd_lock);
33237 }
33238@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33239 {
33240 if (pgd_none(*pgd)) {
33241 pud_t *pud = (pud_t *)spp_getpage();
33242- pgd_populate(&init_mm, pgd, pud);
33243+ pgd_populate_kernel(&init_mm, pgd, pud);
33244 if (pud != pud_offset(pgd, 0))
33245 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33246 pud, pud_offset(pgd, 0));
33247@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33248 {
33249 if (pud_none(*pud)) {
33250 pmd_t *pmd = (pmd_t *) spp_getpage();
33251- pud_populate(&init_mm, pud, pmd);
33252+ pud_populate_kernel(&init_mm, pud, pmd);
33253 if (pmd != pmd_offset(pud, 0))
33254 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33255 pmd, pmd_offset(pud, 0));
33256@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33257 pmd = fill_pmd(pud, vaddr);
33258 pte = fill_pte(pmd, vaddr);
33259
33260+ pax_open_kernel();
33261 set_pte(pte, new_pte);
33262+ pax_close_kernel();
33263
33264 /*
33265 * It's enough to flush this one mapping.
33266@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33267 pgd = pgd_offset_k((unsigned long)__va(phys));
33268 if (pgd_none(*pgd)) {
33269 pud = (pud_t *) spp_getpage();
33270- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33271- _PAGE_USER));
33272+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33273 }
33274 pud = pud_offset(pgd, (unsigned long)__va(phys));
33275 if (pud_none(*pud)) {
33276 pmd = (pmd_t *) spp_getpage();
33277- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33278- _PAGE_USER));
33279+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33280 }
33281 pmd = pmd_offset(pud, phys);
33282 BUG_ON(!pmd_none(*pmd));
33283@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33284 prot);
33285
33286 spin_lock(&init_mm.page_table_lock);
33287- pud_populate(&init_mm, pud, pmd);
33288+ pud_populate_kernel(&init_mm, pud, pmd);
33289 spin_unlock(&init_mm.page_table_lock);
33290 }
33291 __flush_tlb_all();
33292@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33293 page_size_mask);
33294
33295 spin_lock(&init_mm.page_table_lock);
33296- pgd_populate(&init_mm, pgd, pud);
33297+ pgd_populate_kernel(&init_mm, pgd, pud);
33298 spin_unlock(&init_mm.page_table_lock);
33299 pgd_changed = true;
33300 }
33301diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33302index 9ca35fc..4b2b7b7 100644
33303--- a/arch/x86/mm/iomap_32.c
33304+++ b/arch/x86/mm/iomap_32.c
33305@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33306 type = kmap_atomic_idx_push();
33307 idx = type + KM_TYPE_NR * smp_processor_id();
33308 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33309+
33310+ pax_open_kernel();
33311 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33312+ pax_close_kernel();
33313+
33314 arch_flush_lazy_mmu_mode();
33315
33316 return (void *)vaddr;
33317diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33318index fdf617c..b9e85bc 100644
33319--- a/arch/x86/mm/ioremap.c
33320+++ b/arch/x86/mm/ioremap.c
33321@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33322 unsigned long i;
33323
33324 for (i = 0; i < nr_pages; ++i)
33325- if (pfn_valid(start_pfn + i) &&
33326- !PageReserved(pfn_to_page(start_pfn + i)))
33327+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33328+ !PageReserved(pfn_to_page(start_pfn + i))))
33329 return 1;
33330
33331 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33332@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33333 *
33334 * Caller must ensure there is only one unmapping for the same pointer.
33335 */
33336-void iounmap(volatile void __iomem *addr)
33337+void iounmap(const volatile void __iomem *addr)
33338 {
33339 struct vm_struct *p, *o;
33340
33341@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33342 */
33343 void *xlate_dev_mem_ptr(phys_addr_t phys)
33344 {
33345- void *addr;
33346- unsigned long start = phys & PAGE_MASK;
33347-
33348 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33349- if (page_is_ram(start >> PAGE_SHIFT))
33350+ if (page_is_ram(phys >> PAGE_SHIFT))
33351+#ifdef CONFIG_HIGHMEM
33352+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33353+#endif
33354 return __va(phys);
33355
33356- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33357- if (addr)
33358- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33359-
33360- return addr;
33361+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33362 }
33363
33364 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33365 {
33366 if (page_is_ram(phys >> PAGE_SHIFT))
33367+#ifdef CONFIG_HIGHMEM
33368+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33369+#endif
33370 return;
33371
33372 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33373 return;
33374 }
33375
33376-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33377+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33378
33379 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33380 {
33381@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33382 early_ioremap_setup();
33383
33384 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33385- memset(bm_pte, 0, sizeof(bm_pte));
33386- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33387+ pmd_populate_user(&init_mm, pmd, bm_pte);
33388
33389 /*
33390 * The boot-ioremap range spans multiple pmds, for which
33391diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33392index b4f2e7e..96c9c3e 100644
33393--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33394+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33395@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33396 * memory (e.g. tracked pages)? For now, we need this to avoid
33397 * invoking kmemcheck for PnP BIOS calls.
33398 */
33399- if (regs->flags & X86_VM_MASK)
33400+ if (v8086_mode(regs))
33401 return false;
33402- if (regs->cs != __KERNEL_CS)
33403+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33404 return false;
33405
33406 pte = kmemcheck_pte_lookup(address);
33407diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33408index df4552b..12c129c 100644
33409--- a/arch/x86/mm/mmap.c
33410+++ b/arch/x86/mm/mmap.c
33411@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33412 * Leave an at least ~128 MB hole with possible stack randomization.
33413 */
33414 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33415-#define MAX_GAP (TASK_SIZE/6*5)
33416+#define MAX_GAP (pax_task_size/6*5)
33417
33418 static int mmap_is_legacy(void)
33419 {
33420@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33421 return rnd << PAGE_SHIFT;
33422 }
33423
33424-static unsigned long mmap_base(void)
33425+static unsigned long mmap_base(struct mm_struct *mm)
33426 {
33427 unsigned long gap = rlimit(RLIMIT_STACK);
33428+ unsigned long pax_task_size = TASK_SIZE;
33429+
33430+#ifdef CONFIG_PAX_SEGMEXEC
33431+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33432+ pax_task_size = SEGMEXEC_TASK_SIZE;
33433+#endif
33434
33435 if (gap < MIN_GAP)
33436 gap = MIN_GAP;
33437 else if (gap > MAX_GAP)
33438 gap = MAX_GAP;
33439
33440- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33441+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33442 }
33443
33444 /*
33445 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33446 * does, but not when emulating X86_32
33447 */
33448-static unsigned long mmap_legacy_base(void)
33449+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33450 {
33451- if (mmap_is_ia32())
33452+ if (mmap_is_ia32()) {
33453+
33454+#ifdef CONFIG_PAX_SEGMEXEC
33455+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33456+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33457+ else
33458+#endif
33459+
33460 return TASK_UNMAPPED_BASE;
33461- else
33462+ } else
33463 return TASK_UNMAPPED_BASE + mmap_rnd();
33464 }
33465
33466@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33467 */
33468 void arch_pick_mmap_layout(struct mm_struct *mm)
33469 {
33470- mm->mmap_legacy_base = mmap_legacy_base();
33471- mm->mmap_base = mmap_base();
33472+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33473+ mm->mmap_base = mmap_base(mm);
33474+
33475+#ifdef CONFIG_PAX_RANDMMAP
33476+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33477+ mm->mmap_legacy_base += mm->delta_mmap;
33478+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33479+ }
33480+#endif
33481
33482 if (mmap_is_legacy()) {
33483 mm->mmap_base = mm->mmap_legacy_base;
33484diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33485index 0057a7a..95c7edd 100644
33486--- a/arch/x86/mm/mmio-mod.c
33487+++ b/arch/x86/mm/mmio-mod.c
33488@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33489 break;
33490 default:
33491 {
33492- unsigned char *ip = (unsigned char *)instptr;
33493+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33494 my_trace->opcode = MMIO_UNKNOWN_OP;
33495 my_trace->width = 0;
33496 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33497@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33498 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33499 void __iomem *addr)
33500 {
33501- static atomic_t next_id;
33502+ static atomic_unchecked_t next_id;
33503 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33504 /* These are page-unaligned. */
33505 struct mmiotrace_map map = {
33506@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33507 .private = trace
33508 },
33509 .phys = offset,
33510- .id = atomic_inc_return(&next_id)
33511+ .id = atomic_inc_return_unchecked(&next_id)
33512 };
33513 map.map_id = trace->id;
33514
33515@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33516 ioremap_trace_core(offset, size, addr);
33517 }
33518
33519-static void iounmap_trace_core(volatile void __iomem *addr)
33520+static void iounmap_trace_core(const volatile void __iomem *addr)
33521 {
33522 struct mmiotrace_map map = {
33523 .phys = 0,
33524@@ -328,7 +328,7 @@ not_enabled:
33525 }
33526 }
33527
33528-void mmiotrace_iounmap(volatile void __iomem *addr)
33529+void mmiotrace_iounmap(const volatile void __iomem *addr)
33530 {
33531 might_sleep();
33532 if (is_enabled()) /* recheck and proper locking in *_core() */
33533diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33534index 1a88370..3f598b5 100644
33535--- a/arch/x86/mm/numa.c
33536+++ b/arch/x86/mm/numa.c
33537@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33538 }
33539 }
33540
33541-static int __init numa_register_memblks(struct numa_meminfo *mi)
33542+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33543 {
33544 unsigned long uninitialized_var(pfn_align);
33545 int i, nid;
33546diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33547index 536ea2f..f42c293 100644
33548--- a/arch/x86/mm/pageattr.c
33549+++ b/arch/x86/mm/pageattr.c
33550@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33551 */
33552 #ifdef CONFIG_PCI_BIOS
33553 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33554- pgprot_val(forbidden) |= _PAGE_NX;
33555+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33556 #endif
33557
33558 /*
33559@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33560 * Does not cover __inittext since that is gone later on. On
33561 * 64bit we do not enforce !NX on the low mapping
33562 */
33563- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33564- pgprot_val(forbidden) |= _PAGE_NX;
33565+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33566+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33567
33568+#ifdef CONFIG_DEBUG_RODATA
33569 /*
33570 * The .rodata section needs to be read-only. Using the pfn
33571 * catches all aliases.
33572@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33573 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33574 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33575 pgprot_val(forbidden) |= _PAGE_RW;
33576+#endif
33577
33578 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33579 /*
33580@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33581 }
33582 #endif
33583
33584+#ifdef CONFIG_PAX_KERNEXEC
33585+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33586+ pgprot_val(forbidden) |= _PAGE_RW;
33587+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33588+ }
33589+#endif
33590+
33591 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33592
33593 return prot;
33594@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33595 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33596 {
33597 /* change init_mm */
33598+ pax_open_kernel();
33599 set_pte_atomic(kpte, pte);
33600+
33601 #ifdef CONFIG_X86_32
33602 if (!SHARED_KERNEL_PMD) {
33603+
33604+#ifdef CONFIG_PAX_PER_CPU_PGD
33605+ unsigned long cpu;
33606+#else
33607 struct page *page;
33608+#endif
33609
33610+#ifdef CONFIG_PAX_PER_CPU_PGD
33611+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33612+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33613+#else
33614 list_for_each_entry(page, &pgd_list, lru) {
33615- pgd_t *pgd;
33616+ pgd_t *pgd = (pgd_t *)page_address(page);
33617+#endif
33618+
33619 pud_t *pud;
33620 pmd_t *pmd;
33621
33622- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33623+ pgd += pgd_index(address);
33624 pud = pud_offset(pgd, address);
33625 pmd = pmd_offset(pud, address);
33626 set_pte_atomic((pte_t *)pmd, pte);
33627 }
33628 }
33629 #endif
33630+ pax_close_kernel();
33631 }
33632
33633 static int
33634diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33635index 7ac6869..c0ba541 100644
33636--- a/arch/x86/mm/pat.c
33637+++ b/arch/x86/mm/pat.c
33638@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33639 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33640
33641 if (pg_flags == _PGMT_DEFAULT)
33642- return -1;
33643+ return _PAGE_CACHE_MODE_NUM;
33644 else if (pg_flags == _PGMT_WC)
33645 return _PAGE_CACHE_MODE_WC;
33646 else if (pg_flags == _PGMT_UC_MINUS)
33647@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33648
33649 page = pfn_to_page(pfn);
33650 type = get_page_memtype(page);
33651- if (type != -1) {
33652+ if (type != _PAGE_CACHE_MODE_NUM) {
33653 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33654 start, end - 1, type, req_type);
33655 if (new_type)
33656@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33657
33658 if (!entry) {
33659 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33660- current->comm, current->pid, start, end - 1);
33661+ current->comm, task_pid_nr(current), start, end - 1);
33662 return -EINVAL;
33663 }
33664
33665@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33666 page = pfn_to_page(paddr >> PAGE_SHIFT);
33667 rettype = get_page_memtype(page);
33668 /*
33669- * -1 from get_page_memtype() implies RAM page is in its
33670+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33671 * default state and not reserved, and hence of type WB
33672 */
33673- if (rettype == -1)
33674+ if (rettype == _PAGE_CACHE_MODE_NUM)
33675 rettype = _PAGE_CACHE_MODE_WB;
33676
33677 return rettype;
33678@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33679
33680 while (cursor < to) {
33681 if (!devmem_is_allowed(pfn)) {
33682- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33683- current->comm, from, to - 1);
33684+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33685+ current->comm, from, to - 1, cursor);
33686 return 0;
33687 }
33688 cursor += PAGE_SIZE;
33689@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33690 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33691 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33692 "for [mem %#010Lx-%#010Lx]\n",
33693- current->comm, current->pid,
33694+ current->comm, task_pid_nr(current),
33695 cattr_name(pcm),
33696 base, (unsigned long long)(base + size-1));
33697 return -EINVAL;
33698@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33699 pcm = lookup_memtype(paddr);
33700 if (want_pcm != pcm) {
33701 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33702- current->comm, current->pid,
33703+ current->comm, task_pid_nr(current),
33704 cattr_name(want_pcm),
33705 (unsigned long long)paddr,
33706 (unsigned long long)(paddr + size - 1),
33707@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33708 free_memtype(paddr, paddr + size);
33709 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33710 " for [mem %#010Lx-%#010Lx], got %s\n",
33711- current->comm, current->pid,
33712+ current->comm, task_pid_nr(current),
33713 cattr_name(want_pcm),
33714 (unsigned long long)paddr,
33715 (unsigned long long)(paddr + size - 1),
33716diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33717index 6582adc..fcc5d0b 100644
33718--- a/arch/x86/mm/pat_rbtree.c
33719+++ b/arch/x86/mm/pat_rbtree.c
33720@@ -161,7 +161,7 @@ success:
33721
33722 failure:
33723 printk(KERN_INFO "%s:%d conflicting memory types "
33724- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33725+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33726 end, cattr_name(found_type), cattr_name(match->type));
33727 return -EBUSY;
33728 }
33729diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33730index 9f0614d..92ae64a 100644
33731--- a/arch/x86/mm/pf_in.c
33732+++ b/arch/x86/mm/pf_in.c
33733@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33734 int i;
33735 enum reason_type rv = OTHERS;
33736
33737- p = (unsigned char *)ins_addr;
33738+ p = (unsigned char *)ktla_ktva(ins_addr);
33739 p += skip_prefix(p, &prf);
33740 p += get_opcode(p, &opcode);
33741
33742@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33743 struct prefix_bits prf;
33744 int i;
33745
33746- p = (unsigned char *)ins_addr;
33747+ p = (unsigned char *)ktla_ktva(ins_addr);
33748 p += skip_prefix(p, &prf);
33749 p += get_opcode(p, &opcode);
33750
33751@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33752 struct prefix_bits prf;
33753 int i;
33754
33755- p = (unsigned char *)ins_addr;
33756+ p = (unsigned char *)ktla_ktva(ins_addr);
33757 p += skip_prefix(p, &prf);
33758 p += get_opcode(p, &opcode);
33759
33760@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33761 struct prefix_bits prf;
33762 int i;
33763
33764- p = (unsigned char *)ins_addr;
33765+ p = (unsigned char *)ktla_ktva(ins_addr);
33766 p += skip_prefix(p, &prf);
33767 p += get_opcode(p, &opcode);
33768 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33769@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33770 struct prefix_bits prf;
33771 int i;
33772
33773- p = (unsigned char *)ins_addr;
33774+ p = (unsigned char *)ktla_ktva(ins_addr);
33775 p += skip_prefix(p, &prf);
33776 p += get_opcode(p, &opcode);
33777 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33778diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33779index 6fb6927..4fc13c0 100644
33780--- a/arch/x86/mm/pgtable.c
33781+++ b/arch/x86/mm/pgtable.c
33782@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33783 list_del(&page->lru);
33784 }
33785
33786-#define UNSHARED_PTRS_PER_PGD \
33787- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33788+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33789+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33790
33791+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33792+{
33793+ unsigned int count = USER_PGD_PTRS;
33794
33795+ if (!pax_user_shadow_base)
33796+ return;
33797+
33798+ while (count--)
33799+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33800+}
33801+#endif
33802+
33803+#ifdef CONFIG_PAX_PER_CPU_PGD
33804+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33805+{
33806+ unsigned int count = USER_PGD_PTRS;
33807+
33808+ while (count--) {
33809+ pgd_t pgd;
33810+
33811+#ifdef CONFIG_X86_64
33812+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33813+#else
33814+ pgd = *src++;
33815+#endif
33816+
33817+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33818+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33819+#endif
33820+
33821+ *dst++ = pgd;
33822+ }
33823+
33824+}
33825+#endif
33826+
33827+#ifdef CONFIG_X86_64
33828+#define pxd_t pud_t
33829+#define pyd_t pgd_t
33830+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33831+#define pgtable_pxd_page_ctor(page) true
33832+#define pgtable_pxd_page_dtor(page)
33833+#define pxd_free(mm, pud) pud_free((mm), (pud))
33834+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33835+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33836+#define PYD_SIZE PGDIR_SIZE
33837+#else
33838+#define pxd_t pmd_t
33839+#define pyd_t pud_t
33840+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33841+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33842+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33843+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33844+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33845+#define pyd_offset(mm, address) pud_offset((mm), (address))
33846+#define PYD_SIZE PUD_SIZE
33847+#endif
33848+
33849+#ifdef CONFIG_PAX_PER_CPU_PGD
33850+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33851+static inline void pgd_dtor(pgd_t *pgd) {}
33852+#else
33853 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33854 {
33855 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33856@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33857 pgd_list_del(pgd);
33858 spin_unlock(&pgd_lock);
33859 }
33860+#endif
33861
33862 /*
33863 * List of all pgd's needed for non-PAE so it can invalidate entries
33864@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33865 * -- nyc
33866 */
33867
33868-#ifdef CONFIG_X86_PAE
33869+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33870 /*
33871 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33872 * updating the top-level pagetable entries to guarantee the
33873@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33874 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33875 * and initialize the kernel pmds here.
33876 */
33877-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33878+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33879
33880 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33881 {
33882@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33883 */
33884 flush_tlb_mm(mm);
33885 }
33886+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33887+#define PREALLOCATED_PXDS USER_PGD_PTRS
33888 #else /* !CONFIG_X86_PAE */
33889
33890 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33891-#define PREALLOCATED_PMDS 0
33892+#define PREALLOCATED_PXDS 0
33893
33894 #endif /* CONFIG_X86_PAE */
33895
33896-static void free_pmds(pmd_t *pmds[])
33897+static void free_pxds(pxd_t *pxds[])
33898 {
33899 int i;
33900
33901- for(i = 0; i < PREALLOCATED_PMDS; i++)
33902- if (pmds[i]) {
33903- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33904- free_page((unsigned long)pmds[i]);
33905+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33906+ if (pxds[i]) {
33907+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33908+ free_page((unsigned long)pxds[i]);
33909 }
33910 }
33911
33912-static int preallocate_pmds(pmd_t *pmds[])
33913+static int preallocate_pxds(pxd_t *pxds[])
33914 {
33915 int i;
33916 bool failed = false;
33917
33918- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33919- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33920- if (!pmd)
33921+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33922+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33923+ if (!pxd)
33924 failed = true;
33925- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33926- free_page((unsigned long)pmd);
33927- pmd = NULL;
33928+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33929+ free_page((unsigned long)pxd);
33930+ pxd = NULL;
33931 failed = true;
33932 }
33933- pmds[i] = pmd;
33934+ pxds[i] = pxd;
33935 }
33936
33937 if (failed) {
33938- free_pmds(pmds);
33939+ free_pxds(pxds);
33940 return -ENOMEM;
33941 }
33942
33943@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33944 * preallocate which never got a corresponding vma will need to be
33945 * freed manually.
33946 */
33947-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33948+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33949 {
33950 int i;
33951
33952- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33953+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33954 pgd_t pgd = pgdp[i];
33955
33956 if (pgd_val(pgd) != 0) {
33957- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33958+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33959
33960- pgdp[i] = native_make_pgd(0);
33961+ set_pgd(pgdp + i, native_make_pgd(0));
33962
33963- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33964- pmd_free(mm, pmd);
33965+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33966+ pxd_free(mm, pxd);
33967 }
33968 }
33969 }
33970
33971-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33972+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33973 {
33974- pud_t *pud;
33975+ pyd_t *pyd;
33976 int i;
33977
33978- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33979+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33980 return;
33981
33982- pud = pud_offset(pgd, 0);
33983-
33984- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33985- pmd_t *pmd = pmds[i];
33986+#ifdef CONFIG_X86_64
33987+ pyd = pyd_offset(mm, 0L);
33988+#else
33989+ pyd = pyd_offset(pgd, 0L);
33990+#endif
33991
33992+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33993+ pxd_t *pxd = pxds[i];
33994 if (i >= KERNEL_PGD_BOUNDARY)
33995- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33996- sizeof(pmd_t) * PTRS_PER_PMD);
33997+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33998+ sizeof(pxd_t) * PTRS_PER_PMD);
33999
34000- pud_populate(mm, pud, pmd);
34001+ pyd_populate(mm, pyd, pxd);
34002 }
34003 }
34004
34005 pgd_t *pgd_alloc(struct mm_struct *mm)
34006 {
34007 pgd_t *pgd;
34008- pmd_t *pmds[PREALLOCATED_PMDS];
34009+ pxd_t *pxds[PREALLOCATED_PXDS];
34010
34011 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34012
34013@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34014
34015 mm->pgd = pgd;
34016
34017- if (preallocate_pmds(pmds) != 0)
34018+ if (preallocate_pxds(pxds) != 0)
34019 goto out_free_pgd;
34020
34021 if (paravirt_pgd_alloc(mm) != 0)
34022- goto out_free_pmds;
34023+ goto out_free_pxds;
34024
34025 /*
34026 * Make sure that pre-populating the pmds is atomic with
34027@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34028 spin_lock(&pgd_lock);
34029
34030 pgd_ctor(mm, pgd);
34031- pgd_prepopulate_pmd(mm, pgd, pmds);
34032+ pgd_prepopulate_pxd(mm, pgd, pxds);
34033
34034 spin_unlock(&pgd_lock);
34035
34036 return pgd;
34037
34038-out_free_pmds:
34039- free_pmds(pmds);
34040+out_free_pxds:
34041+ free_pxds(pxds);
34042 out_free_pgd:
34043 free_page((unsigned long)pgd);
34044 out:
34045@@ -313,7 +380,7 @@ out:
34046
34047 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34048 {
34049- pgd_mop_up_pmds(mm, pgd);
34050+ pgd_mop_up_pxds(mm, pgd);
34051 pgd_dtor(pgd);
34052 paravirt_pgd_free(mm, pgd);
34053 free_page((unsigned long)pgd);
34054diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34055index 75cc097..79a097f 100644
34056--- a/arch/x86/mm/pgtable_32.c
34057+++ b/arch/x86/mm/pgtable_32.c
34058@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34059 return;
34060 }
34061 pte = pte_offset_kernel(pmd, vaddr);
34062+
34063+ pax_open_kernel();
34064 if (pte_val(pteval))
34065 set_pte_at(&init_mm, vaddr, pte, pteval);
34066 else
34067 pte_clear(&init_mm, vaddr, pte);
34068+ pax_close_kernel();
34069
34070 /*
34071 * It's enough to flush this one mapping.
34072diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34073index e666cbb..61788c45 100644
34074--- a/arch/x86/mm/physaddr.c
34075+++ b/arch/x86/mm/physaddr.c
34076@@ -10,7 +10,7 @@
34077 #ifdef CONFIG_X86_64
34078
34079 #ifdef CONFIG_DEBUG_VIRTUAL
34080-unsigned long __phys_addr(unsigned long x)
34081+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34082 {
34083 unsigned long y = x - __START_KERNEL_map;
34084
34085@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34086 #else
34087
34088 #ifdef CONFIG_DEBUG_VIRTUAL
34089-unsigned long __phys_addr(unsigned long x)
34090+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34091 {
34092 unsigned long phys_addr = x - PAGE_OFFSET;
34093 /* VMALLOC_* aren't constants */
34094diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34095index 90555bf..f5f1828 100644
34096--- a/arch/x86/mm/setup_nx.c
34097+++ b/arch/x86/mm/setup_nx.c
34098@@ -5,8 +5,10 @@
34099 #include <asm/pgtable.h>
34100 #include <asm/proto.h>
34101
34102+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34103 static int disable_nx;
34104
34105+#ifndef CONFIG_PAX_PAGEEXEC
34106 /*
34107 * noexec = on|off
34108 *
34109@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34110 return 0;
34111 }
34112 early_param("noexec", noexec_setup);
34113+#endif
34114+
34115+#endif
34116
34117 void x86_configure_nx(void)
34118 {
34119+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34120 if (cpu_has_nx && !disable_nx)
34121 __supported_pte_mask |= _PAGE_NX;
34122 else
34123+#endif
34124 __supported_pte_mask &= ~_PAGE_NX;
34125 }
34126
34127diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34128index ee61c36..e6fedeb 100644
34129--- a/arch/x86/mm/tlb.c
34130+++ b/arch/x86/mm/tlb.c
34131@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34132 BUG();
34133 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34134 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34135+
34136+#ifndef CONFIG_PAX_PER_CPU_PGD
34137 load_cr3(swapper_pg_dir);
34138+#endif
34139+
34140 /*
34141 * This gets called in the idle path where RCU
34142 * functions differently. Tracing normally
34143diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34144new file mode 100644
34145index 0000000..dace51c
34146--- /dev/null
34147+++ b/arch/x86/mm/uderef_64.c
34148@@ -0,0 +1,37 @@
34149+#include <linux/mm.h>
34150+#include <asm/pgtable.h>
34151+#include <asm/uaccess.h>
34152+
34153+#ifdef CONFIG_PAX_MEMORY_UDEREF
34154+/* PaX: due to the special call convention these functions must
34155+ * - remain leaf functions under all configurations,
34156+ * - never be called directly, only dereferenced from the wrappers.
34157+ */
34158+void __pax_open_userland(void)
34159+{
34160+ unsigned int cpu;
34161+
34162+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34163+ return;
34164+
34165+ cpu = raw_get_cpu();
34166+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34167+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34168+ raw_put_cpu_no_resched();
34169+}
34170+EXPORT_SYMBOL(__pax_open_userland);
34171+
34172+void __pax_close_userland(void)
34173+{
34174+ unsigned int cpu;
34175+
34176+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34177+ return;
34178+
34179+ cpu = raw_get_cpu();
34180+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34181+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34182+ raw_put_cpu_no_resched();
34183+}
34184+EXPORT_SYMBOL(__pax_close_userland);
34185+#endif
34186diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34187index 6440221..f84b5c7 100644
34188--- a/arch/x86/net/bpf_jit.S
34189+++ b/arch/x86/net/bpf_jit.S
34190@@ -9,6 +9,7 @@
34191 */
34192 #include <linux/linkage.h>
34193 #include <asm/dwarf2.h>
34194+#include <asm/alternative-asm.h>
34195
34196 /*
34197 * Calling convention :
34198@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34199 jle bpf_slow_path_word
34200 mov (SKBDATA,%rsi),%eax
34201 bswap %eax /* ntohl() */
34202+ pax_force_retaddr
34203 ret
34204
34205 sk_load_half:
34206@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34207 jle bpf_slow_path_half
34208 movzwl (SKBDATA,%rsi),%eax
34209 rol $8,%ax # ntohs()
34210+ pax_force_retaddr
34211 ret
34212
34213 sk_load_byte:
34214@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34215 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34216 jle bpf_slow_path_byte
34217 movzbl (SKBDATA,%rsi),%eax
34218+ pax_force_retaddr
34219 ret
34220
34221 /* rsi contains offset and can be scratched */
34222@@ -90,6 +94,7 @@ bpf_slow_path_word:
34223 js bpf_error
34224 mov - MAX_BPF_STACK + 32(%rbp),%eax
34225 bswap %eax
34226+ pax_force_retaddr
34227 ret
34228
34229 bpf_slow_path_half:
34230@@ -98,12 +103,14 @@ bpf_slow_path_half:
34231 mov - MAX_BPF_STACK + 32(%rbp),%ax
34232 rol $8,%ax
34233 movzwl %ax,%eax
34234+ pax_force_retaddr
34235 ret
34236
34237 bpf_slow_path_byte:
34238 bpf_slow_path_common(1)
34239 js bpf_error
34240 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34241+ pax_force_retaddr
34242 ret
34243
34244 #define sk_negative_common(SIZE) \
34245@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34246 sk_negative_common(4)
34247 mov (%rax), %eax
34248 bswap %eax
34249+ pax_force_retaddr
34250 ret
34251
34252 bpf_slow_path_half_neg:
34253@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34254 mov (%rax),%ax
34255 rol $8,%ax
34256 movzwl %ax,%eax
34257+ pax_force_retaddr
34258 ret
34259
34260 bpf_slow_path_byte_neg:
34261@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34262 .globl sk_load_byte_negative_offset
34263 sk_negative_common(1)
34264 movzbl (%rax), %eax
34265+ pax_force_retaddr
34266 ret
34267
34268 bpf_error:
34269@@ -156,4 +166,5 @@ bpf_error:
34270 mov - MAX_BPF_STACK + 16(%rbp),%r14
34271 mov - MAX_BPF_STACK + 24(%rbp),%r15
34272 leaveq
34273+ pax_force_retaddr
34274 ret
34275diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34276index 9875143..00f6656 100644
34277--- a/arch/x86/net/bpf_jit_comp.c
34278+++ b/arch/x86/net/bpf_jit_comp.c
34279@@ -13,7 +13,11 @@
34280 #include <linux/if_vlan.h>
34281 #include <asm/cacheflush.h>
34282
34283+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34284+int bpf_jit_enable __read_only;
34285+#else
34286 int bpf_jit_enable __read_mostly;
34287+#endif
34288
34289 /*
34290 * assembly code in arch/x86/net/bpf_jit.S
34291@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34292 static void jit_fill_hole(void *area, unsigned int size)
34293 {
34294 /* fill whole space with int3 instructions */
34295+ pax_open_kernel();
34296 memset(area, 0xcc, size);
34297+ pax_close_kernel();
34298 }
34299
34300 struct jit_context {
34301@@ -896,7 +902,9 @@ common_load:
34302 pr_err("bpf_jit_compile fatal error\n");
34303 return -EFAULT;
34304 }
34305+ pax_open_kernel();
34306 memcpy(image + proglen, temp, ilen);
34307+ pax_close_kernel();
34308 }
34309 proglen += ilen;
34310 addrs[i] = proglen;
34311@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34312
34313 if (image) {
34314 bpf_flush_icache(header, image + proglen);
34315- set_memory_ro((unsigned long)header, header->pages);
34316 prog->bpf_func = (void *)image;
34317 prog->jited = true;
34318 }
34319@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34320 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34321 struct bpf_binary_header *header = (void *)addr;
34322
34323- if (!fp->jited)
34324- goto free_filter;
34325+ if (fp->jited)
34326+ bpf_jit_binary_free(header);
34327
34328- set_memory_rw(addr, header->pages);
34329- bpf_jit_binary_free(header);
34330-
34331-free_filter:
34332 bpf_prog_unlock_free(fp);
34333 }
34334diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34335index 5d04be5..2beeaa2 100644
34336--- a/arch/x86/oprofile/backtrace.c
34337+++ b/arch/x86/oprofile/backtrace.c
34338@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34339 struct stack_frame_ia32 *fp;
34340 unsigned long bytes;
34341
34342- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34343+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34344 if (bytes != 0)
34345 return NULL;
34346
34347- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34348+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34349
34350 oprofile_add_trace(bufhead[0].return_address);
34351
34352@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34353 struct stack_frame bufhead[2];
34354 unsigned long bytes;
34355
34356- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34357+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34358 if (bytes != 0)
34359 return NULL;
34360
34361@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34362 {
34363 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34364
34365- if (!user_mode_vm(regs)) {
34366+ if (!user_mode(regs)) {
34367 unsigned long stack = kernel_stack_pointer(regs);
34368 if (depth)
34369 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34370diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34371index 1d2e639..f6ef82a 100644
34372--- a/arch/x86/oprofile/nmi_int.c
34373+++ b/arch/x86/oprofile/nmi_int.c
34374@@ -23,6 +23,7 @@
34375 #include <asm/nmi.h>
34376 #include <asm/msr.h>
34377 #include <asm/apic.h>
34378+#include <asm/pgtable.h>
34379
34380 #include "op_counter.h"
34381 #include "op_x86_model.h"
34382@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34383 if (ret)
34384 return ret;
34385
34386- if (!model->num_virt_counters)
34387- model->num_virt_counters = model->num_counters;
34388+ if (!model->num_virt_counters) {
34389+ pax_open_kernel();
34390+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34391+ pax_close_kernel();
34392+ }
34393
34394 mux_init(ops);
34395
34396diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34397index 50d86c0..7985318 100644
34398--- a/arch/x86/oprofile/op_model_amd.c
34399+++ b/arch/x86/oprofile/op_model_amd.c
34400@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34401 num_counters = AMD64_NUM_COUNTERS;
34402 }
34403
34404- op_amd_spec.num_counters = num_counters;
34405- op_amd_spec.num_controls = num_counters;
34406- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34407+ pax_open_kernel();
34408+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34409+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34410+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34411+ pax_close_kernel();
34412
34413 return 0;
34414 }
34415diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34416index d90528e..0127e2b 100644
34417--- a/arch/x86/oprofile/op_model_ppro.c
34418+++ b/arch/x86/oprofile/op_model_ppro.c
34419@@ -19,6 +19,7 @@
34420 #include <asm/msr.h>
34421 #include <asm/apic.h>
34422 #include <asm/nmi.h>
34423+#include <asm/pgtable.h>
34424
34425 #include "op_x86_model.h"
34426 #include "op_counter.h"
34427@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34428
34429 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34430
34431- op_arch_perfmon_spec.num_counters = num_counters;
34432- op_arch_perfmon_spec.num_controls = num_counters;
34433+ pax_open_kernel();
34434+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34435+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34436+ pax_close_kernel();
34437 }
34438
34439 static int arch_perfmon_init(struct oprofile_operations *ignore)
34440diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34441index 71e8a67..6a313bb 100644
34442--- a/arch/x86/oprofile/op_x86_model.h
34443+++ b/arch/x86/oprofile/op_x86_model.h
34444@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34445 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34446 struct op_msrs const * const msrs);
34447 #endif
34448-};
34449+} __do_const;
34450
34451 struct op_counter_config;
34452
34453diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34454index 44b9271..4c5a988 100644
34455--- a/arch/x86/pci/intel_mid_pci.c
34456+++ b/arch/x86/pci/intel_mid_pci.c
34457@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34458 pci_mmcfg_late_init();
34459 pcibios_enable_irq = intel_mid_pci_irq_enable;
34460 pcibios_disable_irq = intel_mid_pci_irq_disable;
34461- pci_root_ops = intel_mid_pci_ops;
34462+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34463 pci_soc_mode = 1;
34464 /* Continue with standard init */
34465 return 1;
34466diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34467index 5dc6ca5..25c03f5 100644
34468--- a/arch/x86/pci/irq.c
34469+++ b/arch/x86/pci/irq.c
34470@@ -51,7 +51,7 @@ struct irq_router {
34471 struct irq_router_handler {
34472 u16 vendor;
34473 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34474-};
34475+} __do_const;
34476
34477 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34478 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34479@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34480 return 0;
34481 }
34482
34483-static __initdata struct irq_router_handler pirq_routers[] = {
34484+static __initconst const struct irq_router_handler pirq_routers[] = {
34485 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34486 { PCI_VENDOR_ID_AL, ali_router_probe },
34487 { PCI_VENDOR_ID_ITE, ite_router_probe },
34488@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34489 static void __init pirq_find_router(struct irq_router *r)
34490 {
34491 struct irq_routing_table *rt = pirq_table;
34492- struct irq_router_handler *h;
34493+ const struct irq_router_handler *h;
34494
34495 #ifdef CONFIG_PCI_BIOS
34496 if (!rt->signature) {
34497@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34498 return 0;
34499 }
34500
34501-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34502+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34503 {
34504 .callback = fix_broken_hp_bios_irq9,
34505 .ident = "HP Pavilion N5400 Series Laptop",
34506diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34507index 9b83b90..4112152 100644
34508--- a/arch/x86/pci/pcbios.c
34509+++ b/arch/x86/pci/pcbios.c
34510@@ -79,7 +79,7 @@ union bios32 {
34511 static struct {
34512 unsigned long address;
34513 unsigned short segment;
34514-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34515+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34516
34517 /*
34518 * Returns the entry point for the given service, NULL on error
34519@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34520 unsigned long length; /* %ecx */
34521 unsigned long entry; /* %edx */
34522 unsigned long flags;
34523+ struct desc_struct d, *gdt;
34524
34525 local_irq_save(flags);
34526- __asm__("lcall *(%%edi); cld"
34527+
34528+ gdt = get_cpu_gdt_table(smp_processor_id());
34529+
34530+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34531+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34532+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34533+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34534+
34535+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34536 : "=a" (return_code),
34537 "=b" (address),
34538 "=c" (length),
34539 "=d" (entry)
34540 : "0" (service),
34541 "1" (0),
34542- "D" (&bios32_indirect));
34543+ "D" (&bios32_indirect),
34544+ "r"(__PCIBIOS_DS)
34545+ : "memory");
34546+
34547+ pax_open_kernel();
34548+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34549+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34550+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34551+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34552+ pax_close_kernel();
34553+
34554 local_irq_restore(flags);
34555
34556 switch (return_code) {
34557- case 0:
34558- return address + entry;
34559- case 0x80: /* Not present */
34560- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34561- return 0;
34562- default: /* Shouldn't happen */
34563- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34564- service, return_code);
34565+ case 0: {
34566+ int cpu;
34567+ unsigned char flags;
34568+
34569+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34570+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34571+ printk(KERN_WARNING "bios32_service: not valid\n");
34572 return 0;
34573+ }
34574+ address = address + PAGE_OFFSET;
34575+ length += 16UL; /* some BIOSs underreport this... */
34576+ flags = 4;
34577+ if (length >= 64*1024*1024) {
34578+ length >>= PAGE_SHIFT;
34579+ flags |= 8;
34580+ }
34581+
34582+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34583+ gdt = get_cpu_gdt_table(cpu);
34584+ pack_descriptor(&d, address, length, 0x9b, flags);
34585+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34586+ pack_descriptor(&d, address, length, 0x93, flags);
34587+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34588+ }
34589+ return entry;
34590+ }
34591+ case 0x80: /* Not present */
34592+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34593+ return 0;
34594+ default: /* Shouldn't happen */
34595+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34596+ service, return_code);
34597+ return 0;
34598 }
34599 }
34600
34601 static struct {
34602 unsigned long address;
34603 unsigned short segment;
34604-} pci_indirect = { 0, __KERNEL_CS };
34605+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34606
34607-static int pci_bios_present;
34608+static int pci_bios_present __read_only;
34609
34610 static int __init check_pcibios(void)
34611 {
34612@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34613 unsigned long flags, pcibios_entry;
34614
34615 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34616- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34617+ pci_indirect.address = pcibios_entry;
34618
34619 local_irq_save(flags);
34620- __asm__(
34621- "lcall *(%%edi); cld\n\t"
34622+ __asm__("movw %w6, %%ds\n\t"
34623+ "lcall *%%ss:(%%edi); cld\n\t"
34624+ "push %%ss\n\t"
34625+ "pop %%ds\n\t"
34626 "jc 1f\n\t"
34627 "xor %%ah, %%ah\n"
34628 "1:"
34629@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34630 "=b" (ebx),
34631 "=c" (ecx)
34632 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34633- "D" (&pci_indirect)
34634+ "D" (&pci_indirect),
34635+ "r" (__PCIBIOS_DS)
34636 : "memory");
34637 local_irq_restore(flags);
34638
34639@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34640
34641 switch (len) {
34642 case 1:
34643- __asm__("lcall *(%%esi); cld\n\t"
34644+ __asm__("movw %w6, %%ds\n\t"
34645+ "lcall *%%ss:(%%esi); cld\n\t"
34646+ "push %%ss\n\t"
34647+ "pop %%ds\n\t"
34648 "jc 1f\n\t"
34649 "xor %%ah, %%ah\n"
34650 "1:"
34651@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34652 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34653 "b" (bx),
34654 "D" ((long)reg),
34655- "S" (&pci_indirect));
34656+ "S" (&pci_indirect),
34657+ "r" (__PCIBIOS_DS));
34658 /*
34659 * Zero-extend the result beyond 8 bits, do not trust the
34660 * BIOS having done it:
34661@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34662 *value &= 0xff;
34663 break;
34664 case 2:
34665- __asm__("lcall *(%%esi); cld\n\t"
34666+ __asm__("movw %w6, %%ds\n\t"
34667+ "lcall *%%ss:(%%esi); cld\n\t"
34668+ "push %%ss\n\t"
34669+ "pop %%ds\n\t"
34670 "jc 1f\n\t"
34671 "xor %%ah, %%ah\n"
34672 "1:"
34673@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34674 : "1" (PCIBIOS_READ_CONFIG_WORD),
34675 "b" (bx),
34676 "D" ((long)reg),
34677- "S" (&pci_indirect));
34678+ "S" (&pci_indirect),
34679+ "r" (__PCIBIOS_DS));
34680 /*
34681 * Zero-extend the result beyond 16 bits, do not trust the
34682 * BIOS having done it:
34683@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34684 *value &= 0xffff;
34685 break;
34686 case 4:
34687- __asm__("lcall *(%%esi); cld\n\t"
34688+ __asm__("movw %w6, %%ds\n\t"
34689+ "lcall *%%ss:(%%esi); cld\n\t"
34690+ "push %%ss\n\t"
34691+ "pop %%ds\n\t"
34692 "jc 1f\n\t"
34693 "xor %%ah, %%ah\n"
34694 "1:"
34695@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34696 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34697 "b" (bx),
34698 "D" ((long)reg),
34699- "S" (&pci_indirect));
34700+ "S" (&pci_indirect),
34701+ "r" (__PCIBIOS_DS));
34702 break;
34703 }
34704
34705@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34706
34707 switch (len) {
34708 case 1:
34709- __asm__("lcall *(%%esi); cld\n\t"
34710+ __asm__("movw %w6, %%ds\n\t"
34711+ "lcall *%%ss:(%%esi); cld\n\t"
34712+ "push %%ss\n\t"
34713+ "pop %%ds\n\t"
34714 "jc 1f\n\t"
34715 "xor %%ah, %%ah\n"
34716 "1:"
34717@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34718 "c" (value),
34719 "b" (bx),
34720 "D" ((long)reg),
34721- "S" (&pci_indirect));
34722+ "S" (&pci_indirect),
34723+ "r" (__PCIBIOS_DS));
34724 break;
34725 case 2:
34726- __asm__("lcall *(%%esi); cld\n\t"
34727+ __asm__("movw %w6, %%ds\n\t"
34728+ "lcall *%%ss:(%%esi); cld\n\t"
34729+ "push %%ss\n\t"
34730+ "pop %%ds\n\t"
34731 "jc 1f\n\t"
34732 "xor %%ah, %%ah\n"
34733 "1:"
34734@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34735 "c" (value),
34736 "b" (bx),
34737 "D" ((long)reg),
34738- "S" (&pci_indirect));
34739+ "S" (&pci_indirect),
34740+ "r" (__PCIBIOS_DS));
34741 break;
34742 case 4:
34743- __asm__("lcall *(%%esi); cld\n\t"
34744+ __asm__("movw %w6, %%ds\n\t"
34745+ "lcall *%%ss:(%%esi); cld\n\t"
34746+ "push %%ss\n\t"
34747+ "pop %%ds\n\t"
34748 "jc 1f\n\t"
34749 "xor %%ah, %%ah\n"
34750 "1:"
34751@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34752 "c" (value),
34753 "b" (bx),
34754 "D" ((long)reg),
34755- "S" (&pci_indirect));
34756+ "S" (&pci_indirect),
34757+ "r" (__PCIBIOS_DS));
34758 break;
34759 }
34760
34761@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34762
34763 DBG("PCI: Fetching IRQ routing table... ");
34764 __asm__("push %%es\n\t"
34765+ "movw %w8, %%ds\n\t"
34766 "push %%ds\n\t"
34767 "pop %%es\n\t"
34768- "lcall *(%%esi); cld\n\t"
34769+ "lcall *%%ss:(%%esi); cld\n\t"
34770 "pop %%es\n\t"
34771+ "push %%ss\n\t"
34772+ "pop %%ds\n"
34773 "jc 1f\n\t"
34774 "xor %%ah, %%ah\n"
34775 "1:"
34776@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34777 "1" (0),
34778 "D" ((long) &opt),
34779 "S" (&pci_indirect),
34780- "m" (opt)
34781+ "m" (opt),
34782+ "r" (__PCIBIOS_DS)
34783 : "memory");
34784 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34785 if (ret & 0xff00)
34786@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34787 {
34788 int ret;
34789
34790- __asm__("lcall *(%%esi); cld\n\t"
34791+ __asm__("movw %w5, %%ds\n\t"
34792+ "lcall *%%ss:(%%esi); cld\n\t"
34793+ "push %%ss\n\t"
34794+ "pop %%ds\n"
34795 "jc 1f\n\t"
34796 "xor %%ah, %%ah\n"
34797 "1:"
34798@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34799 : "0" (PCIBIOS_SET_PCI_HW_INT),
34800 "b" ((dev->bus->number << 8) | dev->devfn),
34801 "c" ((irq << 8) | (pin + 10)),
34802- "S" (&pci_indirect));
34803+ "S" (&pci_indirect),
34804+ "r" (__PCIBIOS_DS));
34805 return !(ret & 0xff00);
34806 }
34807 EXPORT_SYMBOL(pcibios_set_irq_routing);
34808diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34809index 40e7cda..c7e6672 100644
34810--- a/arch/x86/platform/efi/efi_32.c
34811+++ b/arch/x86/platform/efi/efi_32.c
34812@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34813 {
34814 struct desc_ptr gdt_descr;
34815
34816+#ifdef CONFIG_PAX_KERNEXEC
34817+ struct desc_struct d;
34818+#endif
34819+
34820 local_irq_save(efi_rt_eflags);
34821
34822 load_cr3(initial_page_table);
34823 __flush_tlb_all();
34824
34825+#ifdef CONFIG_PAX_KERNEXEC
34826+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34827+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34828+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34829+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34830+#endif
34831+
34832 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34833 gdt_descr.size = GDT_SIZE - 1;
34834 load_gdt(&gdt_descr);
34835@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34836 {
34837 struct desc_ptr gdt_descr;
34838
34839+#ifdef CONFIG_PAX_KERNEXEC
34840+ struct desc_struct d;
34841+
34842+ memset(&d, 0, sizeof d);
34843+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34844+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34845+#endif
34846+
34847 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34848 gdt_descr.size = GDT_SIZE - 1;
34849 load_gdt(&gdt_descr);
34850
34851+#ifdef CONFIG_PAX_PER_CPU_PGD
34852+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34853+#else
34854 load_cr3(swapper_pg_dir);
34855+#endif
34856+
34857 __flush_tlb_all();
34858
34859 local_irq_restore(efi_rt_eflags);
34860diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34861index 17e80d8..9fa6e41 100644
34862--- a/arch/x86/platform/efi/efi_64.c
34863+++ b/arch/x86/platform/efi/efi_64.c
34864@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34865 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34866 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34867 }
34868+
34869+#ifdef CONFIG_PAX_PER_CPU_PGD
34870+ load_cr3(swapper_pg_dir);
34871+#endif
34872+
34873 __flush_tlb_all();
34874 }
34875
34876@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34877 for (pgd = 0; pgd < n_pgds; pgd++)
34878 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34879 kfree(save_pgd);
34880+
34881+#ifdef CONFIG_PAX_PER_CPU_PGD
34882+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34883+#endif
34884+
34885 __flush_tlb_all();
34886 local_irq_restore(efi_flags);
34887 early_code_mapping_set_exec(0);
34888@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34889 unsigned npages;
34890 pgd_t *pgd;
34891
34892- if (efi_enabled(EFI_OLD_MEMMAP))
34893+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34894+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34895+ * able to execute the EFI services.
34896+ */
34897+ if (__supported_pte_mask & _PAGE_NX) {
34898+ unsigned long addr = (unsigned long) __va(0);
34899+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34900+
34901+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34902+#ifdef CONFIG_PAX_PER_CPU_PGD
34903+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34904+#endif
34905+ set_pgd(pgd_offset_k(addr), pe);
34906+ }
34907+
34908 return 0;
34909+ }
34910
34911 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34912 pgd = __va(efi_scratch.efi_pgt);
34913diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34914index 040192b..7d3300f 100644
34915--- a/arch/x86/platform/efi/efi_stub_32.S
34916+++ b/arch/x86/platform/efi/efi_stub_32.S
34917@@ -6,7 +6,9 @@
34918 */
34919
34920 #include <linux/linkage.h>
34921+#include <linux/init.h>
34922 #include <asm/page_types.h>
34923+#include <asm/segment.h>
34924
34925 /*
34926 * efi_call_phys(void *, ...) is a function with variable parameters.
34927@@ -20,7 +22,7 @@
34928 * service functions will comply with gcc calling convention, too.
34929 */
34930
34931-.text
34932+__INIT
34933 ENTRY(efi_call_phys)
34934 /*
34935 * 0. The function can only be called in Linux kernel. So CS has been
34936@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34937 * The mapping of lower virtual memory has been created in prolog and
34938 * epilog.
34939 */
34940- movl $1f, %edx
34941- subl $__PAGE_OFFSET, %edx
34942- jmp *%edx
34943+#ifdef CONFIG_PAX_KERNEXEC
34944+ movl $(__KERNEXEC_EFI_DS), %edx
34945+ mov %edx, %ds
34946+ mov %edx, %es
34947+ mov %edx, %ss
34948+ addl $2f,(1f)
34949+ ljmp *(1f)
34950+
34951+__INITDATA
34952+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34953+.previous
34954+
34955+2:
34956+ subl $2b,(1b)
34957+#else
34958+ jmp 1f-__PAGE_OFFSET
34959 1:
34960+#endif
34961
34962 /*
34963 * 2. Now on the top of stack is the return
34964@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34965 * parameter 2, ..., param n. To make things easy, we save the return
34966 * address of efi_call_phys in a global variable.
34967 */
34968- popl %edx
34969- movl %edx, saved_return_addr
34970- /* get the function pointer into ECX*/
34971- popl %ecx
34972- movl %ecx, efi_rt_function_ptr
34973- movl $2f, %edx
34974- subl $__PAGE_OFFSET, %edx
34975- pushl %edx
34976+ popl (saved_return_addr)
34977+ popl (efi_rt_function_ptr)
34978
34979 /*
34980 * 3. Clear PG bit in %CR0.
34981@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34982 /*
34983 * 5. Call the physical function.
34984 */
34985- jmp *%ecx
34986+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34987
34988-2:
34989 /*
34990 * 6. After EFI runtime service returns, control will return to
34991 * following instruction. We'd better readjust stack pointer first.
34992@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34993 movl %cr0, %edx
34994 orl $0x80000000, %edx
34995 movl %edx, %cr0
34996- jmp 1f
34997-1:
34998+
34999 /*
35000 * 8. Now restore the virtual mode from flat mode by
35001 * adding EIP with PAGE_OFFSET.
35002 */
35003- movl $1f, %edx
35004- jmp *%edx
35005+#ifdef CONFIG_PAX_KERNEXEC
35006+ movl $(__KERNEL_DS), %edx
35007+ mov %edx, %ds
35008+ mov %edx, %es
35009+ mov %edx, %ss
35010+ ljmp $(__KERNEL_CS),$1f
35011+#else
35012+ jmp 1f+__PAGE_OFFSET
35013+#endif
35014 1:
35015
35016 /*
35017 * 9. Balance the stack. And because EAX contain the return value,
35018 * we'd better not clobber it.
35019 */
35020- leal efi_rt_function_ptr, %edx
35021- movl (%edx), %ecx
35022- pushl %ecx
35023+ pushl (efi_rt_function_ptr)
35024
35025 /*
35026- * 10. Push the saved return address onto the stack and return.
35027+ * 10. Return to the saved return address.
35028 */
35029- leal saved_return_addr, %edx
35030- movl (%edx), %ecx
35031- pushl %ecx
35032- ret
35033+ jmpl *(saved_return_addr)
35034 ENDPROC(efi_call_phys)
35035 .previous
35036
35037-.data
35038+__INITDATA
35039 saved_return_addr:
35040 .long 0
35041 efi_rt_function_ptr:
35042diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35043index 86d0f9e..6d499f4 100644
35044--- a/arch/x86/platform/efi/efi_stub_64.S
35045+++ b/arch/x86/platform/efi/efi_stub_64.S
35046@@ -11,6 +11,7 @@
35047 #include <asm/msr.h>
35048 #include <asm/processor-flags.h>
35049 #include <asm/page_types.h>
35050+#include <asm/alternative-asm.h>
35051
35052 #define SAVE_XMM \
35053 mov %rsp, %rax; \
35054@@ -88,6 +89,7 @@ ENTRY(efi_call)
35055 RESTORE_PGT
35056 addq $48, %rsp
35057 RESTORE_XMM
35058+ pax_force_retaddr 0, 1
35059 ret
35060 ENDPROC(efi_call)
35061
35062diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35063index 1bbedc4..eb795b5 100644
35064--- a/arch/x86/platform/intel-mid/intel-mid.c
35065+++ b/arch/x86/platform/intel-mid/intel-mid.c
35066@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35067 {
35068 };
35069
35070-static void intel_mid_reboot(void)
35071+static void __noreturn intel_mid_reboot(void)
35072 {
35073 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35074+ BUG();
35075 }
35076
35077 static unsigned long __init intel_mid_calibrate_tsc(void)
35078diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35079index 3c1c386..59a68ed 100644
35080--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35081+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35082@@ -13,6 +13,6 @@
35083 /* For every CPU addition a new get_<cpuname>_ops interface needs
35084 * to be added.
35085 */
35086-extern void *get_penwell_ops(void);
35087-extern void *get_cloverview_ops(void);
35088-extern void *get_tangier_ops(void);
35089+extern const void *get_penwell_ops(void);
35090+extern const void *get_cloverview_ops(void);
35091+extern const void *get_tangier_ops(void);
35092diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35093index 23381d2..8ddc10e 100644
35094--- a/arch/x86/platform/intel-mid/mfld.c
35095+++ b/arch/x86/platform/intel-mid/mfld.c
35096@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35097 pm_power_off = mfld_power_off;
35098 }
35099
35100-void *get_penwell_ops(void)
35101+const void *get_penwell_ops(void)
35102 {
35103 return &penwell_ops;
35104 }
35105
35106-void *get_cloverview_ops(void)
35107+const void *get_cloverview_ops(void)
35108 {
35109 return &penwell_ops;
35110 }
35111diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35112index aaca917..66eadbc 100644
35113--- a/arch/x86/platform/intel-mid/mrfl.c
35114+++ b/arch/x86/platform/intel-mid/mrfl.c
35115@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35116 .arch_setup = tangier_arch_setup,
35117 };
35118
35119-void *get_tangier_ops(void)
35120+const void *get_tangier_ops(void)
35121 {
35122 return &tangier_ops;
35123 }
35124diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35125index d6ee929..3637cb5 100644
35126--- a/arch/x86/platform/olpc/olpc_dt.c
35127+++ b/arch/x86/platform/olpc/olpc_dt.c
35128@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35129 return res;
35130 }
35131
35132-static struct of_pdt_ops prom_olpc_ops __initdata = {
35133+static struct of_pdt_ops prom_olpc_ops __initconst = {
35134 .nextprop = olpc_dt_nextprop,
35135 .getproplen = olpc_dt_getproplen,
35136 .getproperty = olpc_dt_getproperty,
35137diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35138index 6ec7910..ecdbb11 100644
35139--- a/arch/x86/power/cpu.c
35140+++ b/arch/x86/power/cpu.c
35141@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35142 static void fix_processor_context(void)
35143 {
35144 int cpu = smp_processor_id();
35145- struct tss_struct *t = &per_cpu(init_tss, cpu);
35146-#ifdef CONFIG_X86_64
35147- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35148- tss_desc tss;
35149-#endif
35150+ struct tss_struct *t = init_tss + cpu;
35151+
35152 set_tss_desc(cpu, t); /*
35153 * This just modifies memory; should not be
35154 * necessary. But... This is necessary, because
35155@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35156 */
35157
35158 #ifdef CONFIG_X86_64
35159- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35160- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35161- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35162-
35163 syscall_init(); /* This sets MSR_*STAR and related */
35164 #endif
35165 load_TR_desc(); /* This does ltr */
35166diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35167index bad628a..a102610 100644
35168--- a/arch/x86/realmode/init.c
35169+++ b/arch/x86/realmode/init.c
35170@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35171 __va(real_mode_header->trampoline_header);
35172
35173 #ifdef CONFIG_X86_32
35174- trampoline_header->start = __pa_symbol(startup_32_smp);
35175+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35176+
35177+#ifdef CONFIG_PAX_KERNEXEC
35178+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35179+#endif
35180+
35181+ trampoline_header->boot_cs = __BOOT_CS;
35182 trampoline_header->gdt_limit = __BOOT_DS + 7;
35183 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35184 #else
35185@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35186 *trampoline_cr4_features = read_cr4();
35187
35188 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35189- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35190+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35191 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35192 #endif
35193 }
35194diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35195index 7c0d7be..d24dc88 100644
35196--- a/arch/x86/realmode/rm/Makefile
35197+++ b/arch/x86/realmode/rm/Makefile
35198@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35199
35200 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35201 -I$(srctree)/arch/x86/boot
35202+ifdef CONSTIFY_PLUGIN
35203+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35204+endif
35205 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35206 GCOV_PROFILE := n
35207diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35208index a28221d..93c40f1 100644
35209--- a/arch/x86/realmode/rm/header.S
35210+++ b/arch/x86/realmode/rm/header.S
35211@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35212 #endif
35213 /* APM/BIOS reboot */
35214 .long pa_machine_real_restart_asm
35215-#ifdef CONFIG_X86_64
35216+#ifdef CONFIG_X86_32
35217+ .long __KERNEL_CS
35218+#else
35219 .long __KERNEL32_CS
35220 #endif
35221 END(real_mode_header)
35222diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35223index 48ddd76..c26749f 100644
35224--- a/arch/x86/realmode/rm/trampoline_32.S
35225+++ b/arch/x86/realmode/rm/trampoline_32.S
35226@@ -24,6 +24,12 @@
35227 #include <asm/page_types.h>
35228 #include "realmode.h"
35229
35230+#ifdef CONFIG_PAX_KERNEXEC
35231+#define ta(X) (X)
35232+#else
35233+#define ta(X) (pa_ ## X)
35234+#endif
35235+
35236 .text
35237 .code16
35238
35239@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35240
35241 cli # We should be safe anyway
35242
35243- movl tr_start, %eax # where we need to go
35244-
35245 movl $0xA5A5A5A5, trampoline_status
35246 # write marker for master knows we're running
35247
35248@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35249 movw $1, %dx # protected mode (PE) bit
35250 lmsw %dx # into protected mode
35251
35252- ljmpl $__BOOT_CS, $pa_startup_32
35253+ ljmpl *(trampoline_header)
35254
35255 .section ".text32","ax"
35256 .code32
35257@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35258 .balign 8
35259 GLOBAL(trampoline_header)
35260 tr_start: .space 4
35261- tr_gdt_pad: .space 2
35262+ tr_boot_cs: .space 2
35263 tr_gdt: .space 6
35264 END(trampoline_header)
35265
35266diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35267index dac7b20..72dbaca 100644
35268--- a/arch/x86/realmode/rm/trampoline_64.S
35269+++ b/arch/x86/realmode/rm/trampoline_64.S
35270@@ -93,6 +93,7 @@ ENTRY(startup_32)
35271 movl %edx, %gs
35272
35273 movl pa_tr_cr4, %eax
35274+ andl $~X86_CR4_PCIDE, %eax
35275 movl %eax, %cr4 # Enable PAE mode
35276
35277 # Setup trampoline 4 level pagetables
35278@@ -106,7 +107,7 @@ ENTRY(startup_32)
35279 wrmsr
35280
35281 # Enable paging and in turn activate Long Mode
35282- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35283+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35284 movl %eax, %cr0
35285
35286 /*
35287diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35288index 9e7e147..25a4158 100644
35289--- a/arch/x86/realmode/rm/wakeup_asm.S
35290+++ b/arch/x86/realmode/rm/wakeup_asm.S
35291@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35292 lgdtl pmode_gdt
35293
35294 /* This really couldn't... */
35295- movl pmode_entry, %eax
35296 movl pmode_cr0, %ecx
35297 movl %ecx, %cr0
35298- ljmpl $__KERNEL_CS, $pa_startup_32
35299- /* -> jmp *%eax in trampoline_32.S */
35300+
35301+ ljmpl *pmode_entry
35302 #else
35303 jmp trampoline_start
35304 #endif
35305diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35306index 604a37e..e49702a 100644
35307--- a/arch/x86/tools/Makefile
35308+++ b/arch/x86/tools/Makefile
35309@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35310
35311 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35312
35313-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35314+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35315 hostprogs-y += relocs
35316 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35317 PHONY += relocs
35318diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35319index 0c2fae8..88036b7 100644
35320--- a/arch/x86/tools/relocs.c
35321+++ b/arch/x86/tools/relocs.c
35322@@ -1,5 +1,7 @@
35323 /* This is included from relocs_32/64.c */
35324
35325+#include "../../../include/generated/autoconf.h"
35326+
35327 #define ElfW(type) _ElfW(ELF_BITS, type)
35328 #define _ElfW(bits, type) __ElfW(bits, type)
35329 #define __ElfW(bits, type) Elf##bits##_##type
35330@@ -11,6 +13,7 @@
35331 #define Elf_Sym ElfW(Sym)
35332
35333 static Elf_Ehdr ehdr;
35334+static Elf_Phdr *phdr;
35335
35336 struct relocs {
35337 uint32_t *offset;
35338@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35339 }
35340 }
35341
35342+static void read_phdrs(FILE *fp)
35343+{
35344+ unsigned int i;
35345+
35346+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35347+ if (!phdr) {
35348+ die("Unable to allocate %d program headers\n",
35349+ ehdr.e_phnum);
35350+ }
35351+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35352+ die("Seek to %d failed: %s\n",
35353+ ehdr.e_phoff, strerror(errno));
35354+ }
35355+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35356+ die("Cannot read ELF program headers: %s\n",
35357+ strerror(errno));
35358+ }
35359+ for(i = 0; i < ehdr.e_phnum; i++) {
35360+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35361+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35362+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35363+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35364+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35365+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35366+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35367+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35368+ }
35369+
35370+}
35371+
35372 static void read_shdrs(FILE *fp)
35373 {
35374- int i;
35375+ unsigned int i;
35376 Elf_Shdr shdr;
35377
35378 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35379@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35380
35381 static void read_strtabs(FILE *fp)
35382 {
35383- int i;
35384+ unsigned int i;
35385 for (i = 0; i < ehdr.e_shnum; i++) {
35386 struct section *sec = &secs[i];
35387 if (sec->shdr.sh_type != SHT_STRTAB) {
35388@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35389
35390 static void read_symtabs(FILE *fp)
35391 {
35392- int i,j;
35393+ unsigned int i,j;
35394 for (i = 0; i < ehdr.e_shnum; i++) {
35395 struct section *sec = &secs[i];
35396 if (sec->shdr.sh_type != SHT_SYMTAB) {
35397@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35398 }
35399
35400
35401-static void read_relocs(FILE *fp)
35402+static void read_relocs(FILE *fp, int use_real_mode)
35403 {
35404- int i,j;
35405+ unsigned int i,j;
35406+ uint32_t base;
35407+
35408 for (i = 0; i < ehdr.e_shnum; i++) {
35409 struct section *sec = &secs[i];
35410 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35411@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35412 die("Cannot read symbol table: %s\n",
35413 strerror(errno));
35414 }
35415+ base = 0;
35416+
35417+#ifdef CONFIG_X86_32
35418+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35419+ if (phdr[j].p_type != PT_LOAD )
35420+ continue;
35421+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35422+ continue;
35423+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35424+ break;
35425+ }
35426+#endif
35427+
35428 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35429 Elf_Rel *rel = &sec->reltab[j];
35430- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35431+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35432 rel->r_info = elf_xword_to_cpu(rel->r_info);
35433 #if (SHT_REL_TYPE == SHT_RELA)
35434 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35435@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35436
35437 static void print_absolute_symbols(void)
35438 {
35439- int i;
35440+ unsigned int i;
35441 const char *format;
35442
35443 if (ELF_BITS == 64)
35444@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35445 for (i = 0; i < ehdr.e_shnum; i++) {
35446 struct section *sec = &secs[i];
35447 char *sym_strtab;
35448- int j;
35449+ unsigned int j;
35450
35451 if (sec->shdr.sh_type != SHT_SYMTAB) {
35452 continue;
35453@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35454
35455 static void print_absolute_relocs(void)
35456 {
35457- int i, printed = 0;
35458+ unsigned int i, printed = 0;
35459 const char *format;
35460
35461 if (ELF_BITS == 64)
35462@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35463 struct section *sec_applies, *sec_symtab;
35464 char *sym_strtab;
35465 Elf_Sym *sh_symtab;
35466- int j;
35467+ unsigned int j;
35468 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35469 continue;
35470 }
35471@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35472 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35473 Elf_Sym *sym, const char *symname))
35474 {
35475- int i;
35476+ unsigned int i;
35477 /* Walk through the relocations */
35478 for (i = 0; i < ehdr.e_shnum; i++) {
35479 char *sym_strtab;
35480 Elf_Sym *sh_symtab;
35481 struct section *sec_applies, *sec_symtab;
35482- int j;
35483+ unsigned int j;
35484 struct section *sec = &secs[i];
35485
35486 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35487@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35488 {
35489 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35490 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35491+ char *sym_strtab = sec->link->link->strtab;
35492+
35493+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35494+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35495+ return 0;
35496+
35497+#ifdef CONFIG_PAX_KERNEXEC
35498+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35499+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35500+ return 0;
35501+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35502+ return 0;
35503+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35504+ return 0;
35505+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35506+ return 0;
35507+#endif
35508
35509 switch (r_type) {
35510 case R_386_NONE:
35511@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35512
35513 static void emit_relocs(int as_text, int use_real_mode)
35514 {
35515- int i;
35516+ unsigned int i;
35517 int (*write_reloc)(uint32_t, FILE *) = write32;
35518 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35519 const char *symname);
35520@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35521 {
35522 regex_init(use_real_mode);
35523 read_ehdr(fp);
35524+ read_phdrs(fp);
35525 read_shdrs(fp);
35526 read_strtabs(fp);
35527 read_symtabs(fp);
35528- read_relocs(fp);
35529+ read_relocs(fp, use_real_mode);
35530 if (ELF_BITS == 64)
35531 percpu_init();
35532 if (show_absolute_syms) {
35533diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35534index f40281e..92728c9 100644
35535--- a/arch/x86/um/mem_32.c
35536+++ b/arch/x86/um/mem_32.c
35537@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35538 gate_vma.vm_start = FIXADDR_USER_START;
35539 gate_vma.vm_end = FIXADDR_USER_END;
35540 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35541- gate_vma.vm_page_prot = __P101;
35542+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35543
35544 return 0;
35545 }
35546diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35547index 80ffa5b..a33bd15 100644
35548--- a/arch/x86/um/tls_32.c
35549+++ b/arch/x86/um/tls_32.c
35550@@ -260,7 +260,7 @@ out:
35551 if (unlikely(task == current &&
35552 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35553 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35554- "without flushed TLS.", current->pid);
35555+ "without flushed TLS.", task_pid_nr(current));
35556 }
35557
35558 return 0;
35559diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35560index 5a4affe..9e2d522 100644
35561--- a/arch/x86/vdso/Makefile
35562+++ b/arch/x86/vdso/Makefile
35563@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35564 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35565 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35566
35567-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35568+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35569 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35570 GCOV_PROFILE := n
35571
35572diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35573index 0224987..c7d65a5 100644
35574--- a/arch/x86/vdso/vdso2c.h
35575+++ b/arch/x86/vdso/vdso2c.h
35576@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35577 unsigned long load_size = -1; /* Work around bogus warning */
35578 unsigned long mapping_size;
35579 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35580- int i;
35581+ unsigned int i;
35582 unsigned long j;
35583 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35584 *alt_sec = NULL;
35585diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35586index e904c27..b9eaa03 100644
35587--- a/arch/x86/vdso/vdso32-setup.c
35588+++ b/arch/x86/vdso/vdso32-setup.c
35589@@ -14,6 +14,7 @@
35590 #include <asm/cpufeature.h>
35591 #include <asm/processor.h>
35592 #include <asm/vdso.h>
35593+#include <asm/mman.h>
35594
35595 #ifdef CONFIG_COMPAT_VDSO
35596 #define VDSO_DEFAULT 0
35597diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35598index 1c9f750..cfddb1a 100644
35599--- a/arch/x86/vdso/vma.c
35600+++ b/arch/x86/vdso/vma.c
35601@@ -19,10 +19,7 @@
35602 #include <asm/page.h>
35603 #include <asm/hpet.h>
35604 #include <asm/desc.h>
35605-
35606-#if defined(CONFIG_X86_64)
35607-unsigned int __read_mostly vdso64_enabled = 1;
35608-#endif
35609+#include <asm/mman.h>
35610
35611 void __init init_vdso_image(const struct vdso_image *image)
35612 {
35613@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35614 .pages = no_pages,
35615 };
35616
35617+#ifdef CONFIG_PAX_RANDMMAP
35618+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35619+ calculate_addr = false;
35620+#endif
35621+
35622 if (calculate_addr) {
35623 addr = vdso_addr(current->mm->start_stack,
35624 image->size - image->sym_vvar_start);
35625@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35626 down_write(&mm->mmap_sem);
35627
35628 addr = get_unmapped_area(NULL, addr,
35629- image->size - image->sym_vvar_start, 0, 0);
35630+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35631 if (IS_ERR_VALUE(addr)) {
35632 ret = addr;
35633 goto up_fail;
35634 }
35635
35636 text_start = addr - image->sym_vvar_start;
35637- current->mm->context.vdso = (void __user *)text_start;
35638+ mm->context.vdso = text_start;
35639
35640 /*
35641 * MAYWRITE to allow gdb to COW and set breakpoints
35642@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35643 hpet_address >> PAGE_SHIFT,
35644 PAGE_SIZE,
35645 pgprot_noncached(PAGE_READONLY));
35646-
35647- if (ret)
35648- goto up_fail;
35649 }
35650 #endif
35651
35652 up_fail:
35653 if (ret)
35654- current->mm->context.vdso = NULL;
35655+ current->mm->context.vdso = 0;
35656
35657 up_write(&mm->mmap_sem);
35658 return ret;
35659@@ -191,8 +190,8 @@ static int load_vdso32(void)
35660
35661 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35662 current_thread_info()->sysenter_return =
35663- current->mm->context.vdso +
35664- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35665+ (void __force_user *)(current->mm->context.vdso +
35666+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35667
35668 return 0;
35669 }
35670@@ -201,9 +200,6 @@ static int load_vdso32(void)
35671 #ifdef CONFIG_X86_64
35672 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35673 {
35674- if (!vdso64_enabled)
35675- return 0;
35676-
35677 return map_vdso(&vdso_image_64, true);
35678 }
35679
35680@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35681 int uses_interp)
35682 {
35683 #ifdef CONFIG_X86_X32_ABI
35684- if (test_thread_flag(TIF_X32)) {
35685- if (!vdso64_enabled)
35686- return 0;
35687-
35688+ if (test_thread_flag(TIF_X32))
35689 return map_vdso(&vdso_image_x32, true);
35690- }
35691 #endif
35692
35693 return load_vdso32();
35694@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35695 #endif
35696
35697 #ifdef CONFIG_X86_64
35698-static __init int vdso_setup(char *s)
35699-{
35700- vdso64_enabled = simple_strtoul(s, NULL, 0);
35701- return 0;
35702-}
35703-__setup("vdso=", vdso_setup);
35704-#endif
35705-
35706-#ifdef CONFIG_X86_64
35707 static void vgetcpu_cpu_init(void *arg)
35708 {
35709 int cpu = smp_processor_id();
35710diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35711index e88fda8..76ce7ce 100644
35712--- a/arch/x86/xen/Kconfig
35713+++ b/arch/x86/xen/Kconfig
35714@@ -9,6 +9,7 @@ config XEN
35715 select XEN_HAVE_PVMMU
35716 depends on X86_64 || (X86_32 && X86_PAE)
35717 depends on X86_TSC
35718+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35719 help
35720 This is the Linux Xen port. Enabling this will allow the
35721 kernel to boot in a paravirtualized environment under the
35722diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35723index 78a881b..9994bbb 100644
35724--- a/arch/x86/xen/enlighten.c
35725+++ b/arch/x86/xen/enlighten.c
35726@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35727
35728 struct shared_info xen_dummy_shared_info;
35729
35730-void *xen_initial_gdt;
35731-
35732 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35733 __read_mostly int xen_have_vector_callback;
35734 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35735@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35736 {
35737 unsigned long va = dtr->address;
35738 unsigned int size = dtr->size + 1;
35739- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35740- unsigned long frames[pages];
35741+ unsigned long frames[65536 / PAGE_SIZE];
35742 int f;
35743
35744 /*
35745@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35746 {
35747 unsigned long va = dtr->address;
35748 unsigned int size = dtr->size + 1;
35749- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35750- unsigned long frames[pages];
35751+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35752 int f;
35753
35754 /*
35755@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35756 * 8-byte entries, or 16 4k pages..
35757 */
35758
35759- BUG_ON(size > 65536);
35760+ BUG_ON(size > GDT_SIZE);
35761 BUG_ON(va & ~PAGE_MASK);
35762
35763 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35764@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35765 return 0;
35766 }
35767
35768-static void set_xen_basic_apic_ops(void)
35769+static void __init set_xen_basic_apic_ops(void)
35770 {
35771 apic->read = xen_apic_read;
35772 apic->write = xen_apic_write;
35773@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35774 #endif
35775 };
35776
35777-static void xen_reboot(int reason)
35778+static __noreturn void xen_reboot(int reason)
35779 {
35780 struct sched_shutdown r = { .reason = reason };
35781
35782- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35783- BUG();
35784+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35785+ BUG();
35786 }
35787
35788-static void xen_restart(char *msg)
35789+static __noreturn void xen_restart(char *msg)
35790 {
35791 xen_reboot(SHUTDOWN_reboot);
35792 }
35793
35794-static void xen_emergency_restart(void)
35795+static __noreturn void xen_emergency_restart(void)
35796 {
35797 xen_reboot(SHUTDOWN_reboot);
35798 }
35799
35800-static void xen_machine_halt(void)
35801+static __noreturn void xen_machine_halt(void)
35802 {
35803 xen_reboot(SHUTDOWN_poweroff);
35804 }
35805
35806-static void xen_machine_power_off(void)
35807+static __noreturn void xen_machine_power_off(void)
35808 {
35809 if (pm_power_off)
35810 pm_power_off();
35811@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35812 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35813 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35814
35815- setup_stack_canary_segment(0);
35816- switch_to_new_gdt(0);
35817+ setup_stack_canary_segment(cpu);
35818+#ifdef CONFIG_X86_64
35819+ load_percpu_segment(cpu);
35820+#endif
35821+ switch_to_new_gdt(cpu);
35822
35823 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35824 pv_cpu_ops.load_gdt = xen_load_gdt;
35825@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35826 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35827
35828 /* Work out if we support NX */
35829- x86_configure_nx();
35830+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35831+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35832+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35833+ unsigned l, h;
35834+
35835+ __supported_pte_mask |= _PAGE_NX;
35836+ rdmsr(MSR_EFER, l, h);
35837+ l |= EFER_NX;
35838+ wrmsr(MSR_EFER, l, h);
35839+ }
35840+#endif
35841
35842 /* Get mfn list */
35843 xen_build_dynamic_phys_to_machine();
35844@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35845
35846 machine_ops = xen_machine_ops;
35847
35848- /*
35849- * The only reliable way to retain the initial address of the
35850- * percpu gdt_page is to remember it here, so we can go and
35851- * mark it RW later, when the initial percpu area is freed.
35852- */
35853- xen_initial_gdt = &per_cpu(gdt_page, 0);
35854-
35855 xen_smp_init();
35856
35857 #ifdef CONFIG_ACPI_NUMA
35858diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35859index 5c1f9ac..0e15f5c 100644
35860--- a/arch/x86/xen/mmu.c
35861+++ b/arch/x86/xen/mmu.c
35862@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35863 return val;
35864 }
35865
35866-static pteval_t pte_pfn_to_mfn(pteval_t val)
35867+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35868 {
35869 if (val & _PAGE_PRESENT) {
35870 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35871@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35872 * L3_k[511] -> level2_fixmap_pgt */
35873 convert_pfn_mfn(level3_kernel_pgt);
35874
35875+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35876+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35877+ convert_pfn_mfn(level3_vmemmap_pgt);
35878 /* L3_k[511][506] -> level1_fixmap_pgt */
35879+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35880 convert_pfn_mfn(level2_fixmap_pgt);
35881 }
35882 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35883@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35884 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35885 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35886 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35887+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35888+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35889+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35890 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35891 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35892+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35893 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35894 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35895 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35896+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35897
35898 /* Pin down new L4 */
35899 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35900@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35901 pv_mmu_ops.set_pud = xen_set_pud;
35902 #if PAGETABLE_LEVELS == 4
35903 pv_mmu_ops.set_pgd = xen_set_pgd;
35904+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35905 #endif
35906
35907 /* This will work as long as patching hasn't happened yet
35908@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35909 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35910 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35911 .set_pgd = xen_set_pgd_hyper,
35912+ .set_pgd_batched = xen_set_pgd_hyper,
35913
35914 .alloc_pud = xen_alloc_pmd_init,
35915 .release_pud = xen_release_pmd_init,
35916diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35917index 4c071ae..00e7049 100644
35918--- a/arch/x86/xen/smp.c
35919+++ b/arch/x86/xen/smp.c
35920@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35921
35922 if (xen_pv_domain()) {
35923 if (!xen_feature(XENFEAT_writable_page_tables))
35924- /* We've switched to the "real" per-cpu gdt, so make
35925- * sure the old memory can be recycled. */
35926- make_lowmem_page_readwrite(xen_initial_gdt);
35927-
35928 #ifdef CONFIG_X86_32
35929 /*
35930 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35931 * expects __USER_DS
35932 */
35933- loadsegment(ds, __USER_DS);
35934- loadsegment(es, __USER_DS);
35935+ loadsegment(ds, __KERNEL_DS);
35936+ loadsegment(es, __KERNEL_DS);
35937 #endif
35938
35939 xen_filter_cpu_maps();
35940@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35941 #ifdef CONFIG_X86_32
35942 /* Note: PVH is not yet supported on x86_32. */
35943 ctxt->user_regs.fs = __KERNEL_PERCPU;
35944- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35945+ savesegment(gs, ctxt->user_regs.gs);
35946 #endif
35947 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35948
35949@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35950 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35951 ctxt->flags = VGCF_IN_KERNEL;
35952 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35953- ctxt->user_regs.ds = __USER_DS;
35954- ctxt->user_regs.es = __USER_DS;
35955+ ctxt->user_regs.ds = __KERNEL_DS;
35956+ ctxt->user_regs.es = __KERNEL_DS;
35957 ctxt->user_regs.ss = __KERNEL_DS;
35958
35959 xen_copy_trap_info(ctxt->trap_ctxt);
35960@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35961 int rc;
35962
35963 per_cpu(current_task, cpu) = idle;
35964+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35965 #ifdef CONFIG_X86_32
35966 irq_ctx_init(cpu);
35967 #else
35968 clear_tsk_thread_flag(idle, TIF_FORK);
35969 #endif
35970- per_cpu(kernel_stack, cpu) =
35971- (unsigned long)task_stack_page(idle) -
35972- KERNEL_STACK_OFFSET + THREAD_SIZE;
35973+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35974
35975 xen_setup_runstate_info(cpu);
35976 xen_setup_timer(cpu);
35977@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35978
35979 void __init xen_smp_init(void)
35980 {
35981- smp_ops = xen_smp_ops;
35982+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35983 xen_fill_possible_map();
35984 }
35985
35986diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35987index fd92a64..1f72641 100644
35988--- a/arch/x86/xen/xen-asm_32.S
35989+++ b/arch/x86/xen/xen-asm_32.S
35990@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35991 pushw %fs
35992 movl $(__KERNEL_PERCPU), %eax
35993 movl %eax, %fs
35994- movl %fs:xen_vcpu, %eax
35995+ mov PER_CPU_VAR(xen_vcpu), %eax
35996 POP_FS
35997 #else
35998 movl %ss:xen_vcpu, %eax
35999diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36000index 674b2225..f1f5dc1 100644
36001--- a/arch/x86/xen/xen-head.S
36002+++ b/arch/x86/xen/xen-head.S
36003@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36004 #ifdef CONFIG_X86_32
36005 mov %esi,xen_start_info
36006 mov $init_thread_union+THREAD_SIZE,%esp
36007+#ifdef CONFIG_SMP
36008+ movl $cpu_gdt_table,%edi
36009+ movl $__per_cpu_load,%eax
36010+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36011+ rorl $16,%eax
36012+ movb %al,__KERNEL_PERCPU + 4(%edi)
36013+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36014+ movl $__per_cpu_end - 1,%eax
36015+ subl $__per_cpu_start,%eax
36016+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36017+#endif
36018 #else
36019 mov %rsi,xen_start_info
36020 mov $init_thread_union+THREAD_SIZE,%rsp
36021diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36022index 5686bd9..0c8b6ee 100644
36023--- a/arch/x86/xen/xen-ops.h
36024+++ b/arch/x86/xen/xen-ops.h
36025@@ -10,8 +10,6 @@
36026 extern const char xen_hypervisor_callback[];
36027 extern const char xen_failsafe_callback[];
36028
36029-extern void *xen_initial_gdt;
36030-
36031 struct trap_info;
36032 void xen_copy_trap_info(struct trap_info *traps);
36033
36034diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36035index 525bd3d..ef888b1 100644
36036--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36037+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36038@@ -119,9 +119,9 @@
36039 ----------------------------------------------------------------------*/
36040
36041 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36042-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36043 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36044 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36045+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36046
36047 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36048 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36049diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36050index 2f33760..835e50a 100644
36051--- a/arch/xtensa/variants/fsf/include/variant/core.h
36052+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36053@@ -11,6 +11,7 @@
36054 #ifndef _XTENSA_CORE_H
36055 #define _XTENSA_CORE_H
36056
36057+#include <linux/const.h>
36058
36059 /****************************************************************************
36060 Parameters Useful for Any Code, USER or PRIVILEGED
36061@@ -112,9 +113,9 @@
36062 ----------------------------------------------------------------------*/
36063
36064 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36065-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36066 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36067 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36068+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36069
36070 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36071 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36072diff --git a/block/bio.c b/block/bio.c
36073index 471d738..bd3da0d 100644
36074--- a/block/bio.c
36075+++ b/block/bio.c
36076@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36077 /*
36078 * Overflow, abort
36079 */
36080- if (end < start)
36081+ if (end < start || end - start > INT_MAX - nr_pages)
36082 return ERR_PTR(-EINVAL);
36083
36084 nr_pages += end - start;
36085@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36086 /*
36087 * Overflow, abort
36088 */
36089- if (end < start)
36090+ if (end < start || end - start > INT_MAX - nr_pages)
36091 return ERR_PTR(-EINVAL);
36092
36093 nr_pages += end - start;
36094@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36095 const int read = bio_data_dir(bio) == READ;
36096 struct bio_map_data *bmd = bio->bi_private;
36097 int i;
36098- char *p = bmd->sgvecs[0].iov_base;
36099+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36100
36101 bio_for_each_segment_all(bvec, bio, i) {
36102 char *addr = page_address(bvec->bv_page);
36103diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36104index 0736729..2ec3b48 100644
36105--- a/block/blk-iopoll.c
36106+++ b/block/blk-iopoll.c
36107@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36108 }
36109 EXPORT_SYMBOL(blk_iopoll_complete);
36110
36111-static void blk_iopoll_softirq(struct softirq_action *h)
36112+static __latent_entropy void blk_iopoll_softirq(void)
36113 {
36114 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36115 int rearm = 0, budget = blk_iopoll_budget;
36116diff --git a/block/blk-map.c b/block/blk-map.c
36117index f890d43..97b0482 100644
36118--- a/block/blk-map.c
36119+++ b/block/blk-map.c
36120@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36121 if (!len || !kbuf)
36122 return -EINVAL;
36123
36124- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36125+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36126 if (do_copy)
36127 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36128 else
36129diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36130index 53b1737..08177d2e 100644
36131--- a/block/blk-softirq.c
36132+++ b/block/blk-softirq.c
36133@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36134 * Softirq action handler - move entries to local list and loop over them
36135 * while passing them to the queue registered handler.
36136 */
36137-static void blk_done_softirq(struct softirq_action *h)
36138+static __latent_entropy void blk_done_softirq(void)
36139 {
36140 struct list_head *cpu_list, local_list;
36141
36142diff --git a/block/bsg.c b/block/bsg.c
36143index 276e869..6fe4c61 100644
36144--- a/block/bsg.c
36145+++ b/block/bsg.c
36146@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36147 struct sg_io_v4 *hdr, struct bsg_device *bd,
36148 fmode_t has_write_perm)
36149 {
36150+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36151+ unsigned char *cmdptr;
36152+
36153 if (hdr->request_len > BLK_MAX_CDB) {
36154 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36155 if (!rq->cmd)
36156 return -ENOMEM;
36157- }
36158+ cmdptr = rq->cmd;
36159+ } else
36160+ cmdptr = tmpcmd;
36161
36162- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36163+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36164 hdr->request_len))
36165 return -EFAULT;
36166
36167+ if (cmdptr != rq->cmd)
36168+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36169+
36170 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36171 if (blk_verify_command(rq->cmd, has_write_perm))
36172 return -EPERM;
36173diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36174index f678c73..f35aa18 100644
36175--- a/block/compat_ioctl.c
36176+++ b/block/compat_ioctl.c
36177@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36178 cgc = compat_alloc_user_space(sizeof(*cgc));
36179 cgc32 = compat_ptr(arg);
36180
36181- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36182+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36183 get_user(data, &cgc32->buffer) ||
36184 put_user(compat_ptr(data), &cgc->buffer) ||
36185 copy_in_user(&cgc->buflen, &cgc32->buflen,
36186@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36187 err |= __get_user(f->spec1, &uf->spec1);
36188 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36189 err |= __get_user(name, &uf->name);
36190- f->name = compat_ptr(name);
36191+ f->name = (void __force_kernel *)compat_ptr(name);
36192 if (err) {
36193 err = -EFAULT;
36194 goto out;
36195diff --git a/block/genhd.c b/block/genhd.c
36196index 0a536dc..b8f7aca 100644
36197--- a/block/genhd.c
36198+++ b/block/genhd.c
36199@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36200
36201 /*
36202 * Register device numbers dev..(dev+range-1)
36203- * range must be nonzero
36204+ * Noop if @range is zero.
36205 * The hash chain is sorted on range, so that subranges can override.
36206 */
36207 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36208 struct kobject *(*probe)(dev_t, int *, void *),
36209 int (*lock)(dev_t, void *), void *data)
36210 {
36211- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36212+ if (range)
36213+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36214 }
36215
36216 EXPORT_SYMBOL(blk_register_region);
36217
36218+/* undo blk_register_region(), noop if @range is zero */
36219 void blk_unregister_region(dev_t devt, unsigned long range)
36220 {
36221- kobj_unmap(bdev_map, devt, range);
36222+ if (range)
36223+ kobj_unmap(bdev_map, devt, range);
36224 }
36225
36226 EXPORT_SYMBOL(blk_unregister_region);
36227diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36228index 56d08fd..2e07090 100644
36229--- a/block/partitions/efi.c
36230+++ b/block/partitions/efi.c
36231@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36232 if (!gpt)
36233 return NULL;
36234
36235+ if (!le32_to_cpu(gpt->num_partition_entries))
36236+ return NULL;
36237+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36238+ if (!pte)
36239+ return NULL;
36240+
36241 count = le32_to_cpu(gpt->num_partition_entries) *
36242 le32_to_cpu(gpt->sizeof_partition_entry);
36243- if (!count)
36244- return NULL;
36245- pte = kmalloc(count, GFP_KERNEL);
36246- if (!pte)
36247- return NULL;
36248-
36249 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36250 (u8 *) pte, count) < count) {
36251 kfree(pte);
36252diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36253index 28163fa..07190a06 100644
36254--- a/block/scsi_ioctl.c
36255+++ b/block/scsi_ioctl.c
36256@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36257 return put_user(0, p);
36258 }
36259
36260-static int sg_get_timeout(struct request_queue *q)
36261+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36262 {
36263 return jiffies_to_clock_t(q->sg_timeout);
36264 }
36265@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36266 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36267 struct sg_io_hdr *hdr, fmode_t mode)
36268 {
36269- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36270+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36271+ unsigned char *cmdptr;
36272+
36273+ if (rq->cmd != rq->__cmd)
36274+ cmdptr = rq->cmd;
36275+ else
36276+ cmdptr = tmpcmd;
36277+
36278+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36279 return -EFAULT;
36280+
36281+ if (cmdptr != rq->cmd)
36282+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36283+
36284 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36285 return -EPERM;
36286
36287@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36288 int err;
36289 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36290 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36291+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36292+ unsigned char *cmdptr;
36293
36294 if (!sic)
36295 return -EINVAL;
36296@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36297 */
36298 err = -EFAULT;
36299 rq->cmd_len = cmdlen;
36300- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36301+
36302+ if (rq->cmd != rq->__cmd)
36303+ cmdptr = rq->cmd;
36304+ else
36305+ cmdptr = tmpcmd;
36306+
36307+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36308 goto error;
36309
36310+ if (rq->cmd != cmdptr)
36311+ memcpy(rq->cmd, cmdptr, cmdlen);
36312+
36313 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36314 goto error;
36315
36316diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36317index 650afac1..f3307de 100644
36318--- a/crypto/cryptd.c
36319+++ b/crypto/cryptd.c
36320@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36321
36322 struct cryptd_blkcipher_request_ctx {
36323 crypto_completion_t complete;
36324-};
36325+} __no_const;
36326
36327 struct cryptd_hash_ctx {
36328 struct crypto_shash *child;
36329@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36330
36331 struct cryptd_aead_request_ctx {
36332 crypto_completion_t complete;
36333-};
36334+} __no_const;
36335
36336 static void cryptd_queue_worker(struct work_struct *work);
36337
36338diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36339index c305d41..a96de79 100644
36340--- a/crypto/pcrypt.c
36341+++ b/crypto/pcrypt.c
36342@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36343 int ret;
36344
36345 pinst->kobj.kset = pcrypt_kset;
36346- ret = kobject_add(&pinst->kobj, NULL, name);
36347+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36348 if (!ret)
36349 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36350
36351diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36352index 6921c7f..78e1af7 100644
36353--- a/drivers/acpi/acpica/hwxfsleep.c
36354+++ b/drivers/acpi/acpica/hwxfsleep.c
36355@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36356 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36357
36358 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36359- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36360- acpi_hw_extended_sleep},
36361- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36362- acpi_hw_extended_wake_prep},
36363- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36364+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36365+ .extended_function = acpi_hw_extended_sleep},
36366+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36367+ .extended_function = acpi_hw_extended_wake_prep},
36368+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36369+ .extended_function = acpi_hw_extended_wake}
36370 };
36371
36372 /*
36373diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36374index 16129c7..8b675cd 100644
36375--- a/drivers/acpi/apei/apei-internal.h
36376+++ b/drivers/acpi/apei/apei-internal.h
36377@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36378 struct apei_exec_ins_type {
36379 u32 flags;
36380 apei_exec_ins_func_t run;
36381-};
36382+} __do_const;
36383
36384 struct apei_exec_context {
36385 u32 ip;
36386diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36387index e82d097..0c855c1 100644
36388--- a/drivers/acpi/apei/ghes.c
36389+++ b/drivers/acpi/apei/ghes.c
36390@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36391 const struct acpi_hest_generic *generic,
36392 const struct acpi_hest_generic_status *estatus)
36393 {
36394- static atomic_t seqno;
36395+ static atomic_unchecked_t seqno;
36396 unsigned int curr_seqno;
36397 char pfx_seq[64];
36398
36399@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36400 else
36401 pfx = KERN_ERR;
36402 }
36403- curr_seqno = atomic_inc_return(&seqno);
36404+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36405 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36406 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36407 pfx_seq, generic->header.source_id);
36408diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36409index a83e3c6..c3d617f 100644
36410--- a/drivers/acpi/bgrt.c
36411+++ b/drivers/acpi/bgrt.c
36412@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36413 if (!bgrt_image)
36414 return -ENODEV;
36415
36416- bin_attr_image.private = bgrt_image;
36417- bin_attr_image.size = bgrt_image_size;
36418+ pax_open_kernel();
36419+ *(void **)&bin_attr_image.private = bgrt_image;
36420+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36421+ pax_close_kernel();
36422
36423 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36424 if (!bgrt_kobj)
36425diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36426index 9b693d5..8953d54 100644
36427--- a/drivers/acpi/blacklist.c
36428+++ b/drivers/acpi/blacklist.c
36429@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36430 u32 is_critical_error;
36431 };
36432
36433-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36434+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36435
36436 /*
36437 * POLICY: If *anything* doesn't work, put it on the blacklist.
36438@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36439 return 0;
36440 }
36441
36442-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36443+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36444 {
36445 .callback = dmi_disable_osi_vista,
36446 .ident = "Fujitsu Siemens",
36447diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36448index c68e724..e863008 100644
36449--- a/drivers/acpi/custom_method.c
36450+++ b/drivers/acpi/custom_method.c
36451@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36452 struct acpi_table_header table;
36453 acpi_status status;
36454
36455+#ifdef CONFIG_GRKERNSEC_KMEM
36456+ return -EPERM;
36457+#endif
36458+
36459 if (!(*ppos)) {
36460 /* parse the table header to get the table length */
36461 if (count <= sizeof(struct acpi_table_header))
36462diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36463index c0d44d3..5ad8f9a 100644
36464--- a/drivers/acpi/device_pm.c
36465+++ b/drivers/acpi/device_pm.c
36466@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36467
36468 #endif /* CONFIG_PM_SLEEP */
36469
36470+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36471+
36472 static struct dev_pm_domain acpi_general_pm_domain = {
36473 .ops = {
36474 #ifdef CONFIG_PM
36475@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36476 #endif
36477 #endif
36478 },
36479+ .detach = acpi_dev_pm_detach
36480 };
36481
36482 /**
36483@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36484 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36485 }
36486
36487- dev->pm_domain->detach = acpi_dev_pm_detach;
36488 return 0;
36489 }
36490 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36491diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36492index 87b704e..2d1d0c1 100644
36493--- a/drivers/acpi/processor_idle.c
36494+++ b/drivers/acpi/processor_idle.c
36495@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36496 {
36497 int i, count = CPUIDLE_DRIVER_STATE_START;
36498 struct acpi_processor_cx *cx;
36499- struct cpuidle_state *state;
36500+ cpuidle_state_no_const *state;
36501 struct cpuidle_driver *drv = &acpi_idle_driver;
36502
36503 if (!pr->flags.power_setup_done)
36504diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36505index 13e577c..cef11ee 100644
36506--- a/drivers/acpi/sysfs.c
36507+++ b/drivers/acpi/sysfs.c
36508@@ -423,11 +423,11 @@ static u32 num_counters;
36509 static struct attribute **all_attrs;
36510 static u32 acpi_gpe_count;
36511
36512-static struct attribute_group interrupt_stats_attr_group = {
36513+static attribute_group_no_const interrupt_stats_attr_group = {
36514 .name = "interrupts",
36515 };
36516
36517-static struct kobj_attribute *counter_attrs;
36518+static kobj_attribute_no_const *counter_attrs;
36519
36520 static void delete_gpe_attr_array(void)
36521 {
36522diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36523index 61a9c07..ea98fa1 100644
36524--- a/drivers/ata/libahci.c
36525+++ b/drivers/ata/libahci.c
36526@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36527 }
36528 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36529
36530-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36531+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36532 struct ata_taskfile *tf, int is_cmd, u16 flags,
36533 unsigned long timeout_msec)
36534 {
36535diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36536index d1a05f9..eb70e10 100644
36537--- a/drivers/ata/libata-core.c
36538+++ b/drivers/ata/libata-core.c
36539@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36540 static void ata_dev_xfermask(struct ata_device *dev);
36541 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36542
36543-atomic_t ata_print_id = ATOMIC_INIT(0);
36544+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36545
36546 struct ata_force_param {
36547 const char *name;
36548@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36549 struct ata_port *ap;
36550 unsigned int tag;
36551
36552- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36553+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36554 ap = qc->ap;
36555
36556 qc->flags = 0;
36557@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36558 struct ata_port *ap;
36559 struct ata_link *link;
36560
36561- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36562+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36563 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36564 ap = qc->ap;
36565 link = qc->dev->link;
36566@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36567 return;
36568
36569 spin_lock(&lock);
36570+ pax_open_kernel();
36571
36572 for (cur = ops->inherits; cur; cur = cur->inherits) {
36573 void **inherit = (void **)cur;
36574@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36575 if (IS_ERR(*pp))
36576 *pp = NULL;
36577
36578- ops->inherits = NULL;
36579+ *(struct ata_port_operations **)&ops->inherits = NULL;
36580
36581+ pax_close_kernel();
36582 spin_unlock(&lock);
36583 }
36584
36585@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36586
36587 /* give ports names and add SCSI hosts */
36588 for (i = 0; i < host->n_ports; i++) {
36589- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36590+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36591 host->ports[i]->local_port_no = i + 1;
36592 }
36593
36594diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36595index 6abd17a..9961bf7 100644
36596--- a/drivers/ata/libata-scsi.c
36597+++ b/drivers/ata/libata-scsi.c
36598@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36599
36600 if (rc)
36601 return rc;
36602- ap->print_id = atomic_inc_return(&ata_print_id);
36603+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36604 return 0;
36605 }
36606 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36607diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36608index 5f4e0cc..ff2c347 100644
36609--- a/drivers/ata/libata.h
36610+++ b/drivers/ata/libata.h
36611@@ -53,7 +53,7 @@ enum {
36612 ATA_DNXFER_QUIET = (1 << 31),
36613 };
36614
36615-extern atomic_t ata_print_id;
36616+extern atomic_unchecked_t ata_print_id;
36617 extern int atapi_passthru16;
36618 extern int libata_fua;
36619 extern int libata_noacpi;
36620diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36621index a9b0c82..207d97d 100644
36622--- a/drivers/ata/pata_arasan_cf.c
36623+++ b/drivers/ata/pata_arasan_cf.c
36624@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36625 /* Handle platform specific quirks */
36626 if (quirk) {
36627 if (quirk & CF_BROKEN_PIO) {
36628- ap->ops->set_piomode = NULL;
36629+ pax_open_kernel();
36630+ *(void **)&ap->ops->set_piomode = NULL;
36631+ pax_close_kernel();
36632 ap->pio_mask = 0;
36633 }
36634 if (quirk & CF_BROKEN_MWDMA)
36635diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36636index f9b983a..887b9d8 100644
36637--- a/drivers/atm/adummy.c
36638+++ b/drivers/atm/adummy.c
36639@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36640 vcc->pop(vcc, skb);
36641 else
36642 dev_kfree_skb_any(skb);
36643- atomic_inc(&vcc->stats->tx);
36644+ atomic_inc_unchecked(&vcc->stats->tx);
36645
36646 return 0;
36647 }
36648diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36649index f1a9198..f466a4a 100644
36650--- a/drivers/atm/ambassador.c
36651+++ b/drivers/atm/ambassador.c
36652@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36653 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36654
36655 // VC layer stats
36656- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36657+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36658
36659 // free the descriptor
36660 kfree (tx_descr);
36661@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36662 dump_skb ("<<<", vc, skb);
36663
36664 // VC layer stats
36665- atomic_inc(&atm_vcc->stats->rx);
36666+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36667 __net_timestamp(skb);
36668 // end of our responsibility
36669 atm_vcc->push (atm_vcc, skb);
36670@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36671 } else {
36672 PRINTK (KERN_INFO, "dropped over-size frame");
36673 // should we count this?
36674- atomic_inc(&atm_vcc->stats->rx_drop);
36675+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36676 }
36677
36678 } else {
36679@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36680 }
36681
36682 if (check_area (skb->data, skb->len)) {
36683- atomic_inc(&atm_vcc->stats->tx_err);
36684+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36685 return -ENOMEM; // ?
36686 }
36687
36688diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36689index 480fa6f..947067c 100644
36690--- a/drivers/atm/atmtcp.c
36691+++ b/drivers/atm/atmtcp.c
36692@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36693 if (vcc->pop) vcc->pop(vcc,skb);
36694 else dev_kfree_skb(skb);
36695 if (dev_data) return 0;
36696- atomic_inc(&vcc->stats->tx_err);
36697+ atomic_inc_unchecked(&vcc->stats->tx_err);
36698 return -ENOLINK;
36699 }
36700 size = skb->len+sizeof(struct atmtcp_hdr);
36701@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36702 if (!new_skb) {
36703 if (vcc->pop) vcc->pop(vcc,skb);
36704 else dev_kfree_skb(skb);
36705- atomic_inc(&vcc->stats->tx_err);
36706+ atomic_inc_unchecked(&vcc->stats->tx_err);
36707 return -ENOBUFS;
36708 }
36709 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36710@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36711 if (vcc->pop) vcc->pop(vcc,skb);
36712 else dev_kfree_skb(skb);
36713 out_vcc->push(out_vcc,new_skb);
36714- atomic_inc(&vcc->stats->tx);
36715- atomic_inc(&out_vcc->stats->rx);
36716+ atomic_inc_unchecked(&vcc->stats->tx);
36717+ atomic_inc_unchecked(&out_vcc->stats->rx);
36718 return 0;
36719 }
36720
36721@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36722 read_unlock(&vcc_sklist_lock);
36723 if (!out_vcc) {
36724 result = -EUNATCH;
36725- atomic_inc(&vcc->stats->tx_err);
36726+ atomic_inc_unchecked(&vcc->stats->tx_err);
36727 goto done;
36728 }
36729 skb_pull(skb,sizeof(struct atmtcp_hdr));
36730@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36731 __net_timestamp(new_skb);
36732 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36733 out_vcc->push(out_vcc,new_skb);
36734- atomic_inc(&vcc->stats->tx);
36735- atomic_inc(&out_vcc->stats->rx);
36736+ atomic_inc_unchecked(&vcc->stats->tx);
36737+ atomic_inc_unchecked(&out_vcc->stats->rx);
36738 done:
36739 if (vcc->pop) vcc->pop(vcc,skb);
36740 else dev_kfree_skb(skb);
36741diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36742index c7fab3e..68d0965 100644
36743--- a/drivers/atm/eni.c
36744+++ b/drivers/atm/eni.c
36745@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36746 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36747 vcc->dev->number);
36748 length = 0;
36749- atomic_inc(&vcc->stats->rx_err);
36750+ atomic_inc_unchecked(&vcc->stats->rx_err);
36751 }
36752 else {
36753 length = ATM_CELL_SIZE-1; /* no HEC */
36754@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36755 size);
36756 }
36757 eff = length = 0;
36758- atomic_inc(&vcc->stats->rx_err);
36759+ atomic_inc_unchecked(&vcc->stats->rx_err);
36760 }
36761 else {
36762 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36763@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36764 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36765 vcc->dev->number,vcc->vci,length,size << 2,descr);
36766 length = eff = 0;
36767- atomic_inc(&vcc->stats->rx_err);
36768+ atomic_inc_unchecked(&vcc->stats->rx_err);
36769 }
36770 }
36771 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36772@@ -770,7 +770,7 @@ rx_dequeued++;
36773 vcc->push(vcc,skb);
36774 pushed++;
36775 }
36776- atomic_inc(&vcc->stats->rx);
36777+ atomic_inc_unchecked(&vcc->stats->rx);
36778 }
36779 wake_up(&eni_dev->rx_wait);
36780 }
36781@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36782 PCI_DMA_TODEVICE);
36783 if (vcc->pop) vcc->pop(vcc,skb);
36784 else dev_kfree_skb_irq(skb);
36785- atomic_inc(&vcc->stats->tx);
36786+ atomic_inc_unchecked(&vcc->stats->tx);
36787 wake_up(&eni_dev->tx_wait);
36788 dma_complete++;
36789 }
36790diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36791index 82f2ae0..f205c02 100644
36792--- a/drivers/atm/firestream.c
36793+++ b/drivers/atm/firestream.c
36794@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36795 }
36796 }
36797
36798- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36799+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36800
36801 fs_dprintk (FS_DEBUG_TXMEM, "i");
36802 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36803@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36804 #endif
36805 skb_put (skb, qe->p1 & 0xffff);
36806 ATM_SKB(skb)->vcc = atm_vcc;
36807- atomic_inc(&atm_vcc->stats->rx);
36808+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36809 __net_timestamp(skb);
36810 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36811 atm_vcc->push (atm_vcc, skb);
36812@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36813 kfree (pe);
36814 }
36815 if (atm_vcc)
36816- atomic_inc(&atm_vcc->stats->rx_drop);
36817+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36818 break;
36819 case 0x1f: /* Reassembly abort: no buffers. */
36820 /* Silently increment error counter. */
36821 if (atm_vcc)
36822- atomic_inc(&atm_vcc->stats->rx_drop);
36823+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36824 break;
36825 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36826 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36827diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36828index d5d9eaf..65c0d53 100644
36829--- a/drivers/atm/fore200e.c
36830+++ b/drivers/atm/fore200e.c
36831@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36832 #endif
36833 /* check error condition */
36834 if (*entry->status & STATUS_ERROR)
36835- atomic_inc(&vcc->stats->tx_err);
36836+ atomic_inc_unchecked(&vcc->stats->tx_err);
36837 else
36838- atomic_inc(&vcc->stats->tx);
36839+ atomic_inc_unchecked(&vcc->stats->tx);
36840 }
36841 }
36842
36843@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36844 if (skb == NULL) {
36845 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36846
36847- atomic_inc(&vcc->stats->rx_drop);
36848+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36849 return -ENOMEM;
36850 }
36851
36852@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36853
36854 dev_kfree_skb_any(skb);
36855
36856- atomic_inc(&vcc->stats->rx_drop);
36857+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36858 return -ENOMEM;
36859 }
36860
36861 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36862
36863 vcc->push(vcc, skb);
36864- atomic_inc(&vcc->stats->rx);
36865+ atomic_inc_unchecked(&vcc->stats->rx);
36866
36867 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36868
36869@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36870 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36871 fore200e->atm_dev->number,
36872 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36873- atomic_inc(&vcc->stats->rx_err);
36874+ atomic_inc_unchecked(&vcc->stats->rx_err);
36875 }
36876 }
36877
36878@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36879 goto retry_here;
36880 }
36881
36882- atomic_inc(&vcc->stats->tx_err);
36883+ atomic_inc_unchecked(&vcc->stats->tx_err);
36884
36885 fore200e->tx_sat++;
36886 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36887diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36888index c39702b..785b73b 100644
36889--- a/drivers/atm/he.c
36890+++ b/drivers/atm/he.c
36891@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36892
36893 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36894 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36895- atomic_inc(&vcc->stats->rx_drop);
36896+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36897 goto return_host_buffers;
36898 }
36899
36900@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36901 RBRQ_LEN_ERR(he_dev->rbrq_head)
36902 ? "LEN_ERR" : "",
36903 vcc->vpi, vcc->vci);
36904- atomic_inc(&vcc->stats->rx_err);
36905+ atomic_inc_unchecked(&vcc->stats->rx_err);
36906 goto return_host_buffers;
36907 }
36908
36909@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36910 vcc->push(vcc, skb);
36911 spin_lock(&he_dev->global_lock);
36912
36913- atomic_inc(&vcc->stats->rx);
36914+ atomic_inc_unchecked(&vcc->stats->rx);
36915
36916 return_host_buffers:
36917 ++pdus_assembled;
36918@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36919 tpd->vcc->pop(tpd->vcc, tpd->skb);
36920 else
36921 dev_kfree_skb_any(tpd->skb);
36922- atomic_inc(&tpd->vcc->stats->tx_err);
36923+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36924 }
36925 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36926 return;
36927@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36928 vcc->pop(vcc, skb);
36929 else
36930 dev_kfree_skb_any(skb);
36931- atomic_inc(&vcc->stats->tx_err);
36932+ atomic_inc_unchecked(&vcc->stats->tx_err);
36933 return -EINVAL;
36934 }
36935
36936@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36937 vcc->pop(vcc, skb);
36938 else
36939 dev_kfree_skb_any(skb);
36940- atomic_inc(&vcc->stats->tx_err);
36941+ atomic_inc_unchecked(&vcc->stats->tx_err);
36942 return -EINVAL;
36943 }
36944 #endif
36945@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36946 vcc->pop(vcc, skb);
36947 else
36948 dev_kfree_skb_any(skb);
36949- atomic_inc(&vcc->stats->tx_err);
36950+ atomic_inc_unchecked(&vcc->stats->tx_err);
36951 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36952 return -ENOMEM;
36953 }
36954@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36955 vcc->pop(vcc, skb);
36956 else
36957 dev_kfree_skb_any(skb);
36958- atomic_inc(&vcc->stats->tx_err);
36959+ atomic_inc_unchecked(&vcc->stats->tx_err);
36960 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36961 return -ENOMEM;
36962 }
36963@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36964 __enqueue_tpd(he_dev, tpd, cid);
36965 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36966
36967- atomic_inc(&vcc->stats->tx);
36968+ atomic_inc_unchecked(&vcc->stats->tx);
36969
36970 return 0;
36971 }
36972diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36973index 1dc0519..1aadaf7 100644
36974--- a/drivers/atm/horizon.c
36975+++ b/drivers/atm/horizon.c
36976@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36977 {
36978 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36979 // VC layer stats
36980- atomic_inc(&vcc->stats->rx);
36981+ atomic_inc_unchecked(&vcc->stats->rx);
36982 __net_timestamp(skb);
36983 // end of our responsibility
36984 vcc->push (vcc, skb);
36985@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36986 dev->tx_iovec = NULL;
36987
36988 // VC layer stats
36989- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36990+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36991
36992 // free the skb
36993 hrz_kfree_skb (skb);
36994diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36995index 2b24ed0..b3d6acc 100644
36996--- a/drivers/atm/idt77252.c
36997+++ b/drivers/atm/idt77252.c
36998@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36999 else
37000 dev_kfree_skb(skb);
37001
37002- atomic_inc(&vcc->stats->tx);
37003+ atomic_inc_unchecked(&vcc->stats->tx);
37004 }
37005
37006 atomic_dec(&scq->used);
37007@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37008 if ((sb = dev_alloc_skb(64)) == NULL) {
37009 printk("%s: Can't allocate buffers for aal0.\n",
37010 card->name);
37011- atomic_add(i, &vcc->stats->rx_drop);
37012+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37013 break;
37014 }
37015 if (!atm_charge(vcc, sb->truesize)) {
37016 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37017 card->name);
37018- atomic_add(i - 1, &vcc->stats->rx_drop);
37019+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37020 dev_kfree_skb(sb);
37021 break;
37022 }
37023@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37024 ATM_SKB(sb)->vcc = vcc;
37025 __net_timestamp(sb);
37026 vcc->push(vcc, sb);
37027- atomic_inc(&vcc->stats->rx);
37028+ atomic_inc_unchecked(&vcc->stats->rx);
37029
37030 cell += ATM_CELL_PAYLOAD;
37031 }
37032@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37033 "(CDC: %08x)\n",
37034 card->name, len, rpp->len, readl(SAR_REG_CDC));
37035 recycle_rx_pool_skb(card, rpp);
37036- atomic_inc(&vcc->stats->rx_err);
37037+ atomic_inc_unchecked(&vcc->stats->rx_err);
37038 return;
37039 }
37040 if (stat & SAR_RSQE_CRC) {
37041 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37042 recycle_rx_pool_skb(card, rpp);
37043- atomic_inc(&vcc->stats->rx_err);
37044+ atomic_inc_unchecked(&vcc->stats->rx_err);
37045 return;
37046 }
37047 if (skb_queue_len(&rpp->queue) > 1) {
37048@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37049 RXPRINTK("%s: Can't alloc RX skb.\n",
37050 card->name);
37051 recycle_rx_pool_skb(card, rpp);
37052- atomic_inc(&vcc->stats->rx_err);
37053+ atomic_inc_unchecked(&vcc->stats->rx_err);
37054 return;
37055 }
37056 if (!atm_charge(vcc, skb->truesize)) {
37057@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37058 __net_timestamp(skb);
37059
37060 vcc->push(vcc, skb);
37061- atomic_inc(&vcc->stats->rx);
37062+ atomic_inc_unchecked(&vcc->stats->rx);
37063
37064 return;
37065 }
37066@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37067 __net_timestamp(skb);
37068
37069 vcc->push(vcc, skb);
37070- atomic_inc(&vcc->stats->rx);
37071+ atomic_inc_unchecked(&vcc->stats->rx);
37072
37073 if (skb->truesize > SAR_FB_SIZE_3)
37074 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37075@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37076 if (vcc->qos.aal != ATM_AAL0) {
37077 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37078 card->name, vpi, vci);
37079- atomic_inc(&vcc->stats->rx_drop);
37080+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37081 goto drop;
37082 }
37083
37084 if ((sb = dev_alloc_skb(64)) == NULL) {
37085 printk("%s: Can't allocate buffers for AAL0.\n",
37086 card->name);
37087- atomic_inc(&vcc->stats->rx_err);
37088+ atomic_inc_unchecked(&vcc->stats->rx_err);
37089 goto drop;
37090 }
37091
37092@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37093 ATM_SKB(sb)->vcc = vcc;
37094 __net_timestamp(sb);
37095 vcc->push(vcc, sb);
37096- atomic_inc(&vcc->stats->rx);
37097+ atomic_inc_unchecked(&vcc->stats->rx);
37098
37099 drop:
37100 skb_pull(queue, 64);
37101@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37102
37103 if (vc == NULL) {
37104 printk("%s: NULL connection in send().\n", card->name);
37105- atomic_inc(&vcc->stats->tx_err);
37106+ atomic_inc_unchecked(&vcc->stats->tx_err);
37107 dev_kfree_skb(skb);
37108 return -EINVAL;
37109 }
37110 if (!test_bit(VCF_TX, &vc->flags)) {
37111 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37112- atomic_inc(&vcc->stats->tx_err);
37113+ atomic_inc_unchecked(&vcc->stats->tx_err);
37114 dev_kfree_skb(skb);
37115 return -EINVAL;
37116 }
37117@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37118 break;
37119 default:
37120 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37121- atomic_inc(&vcc->stats->tx_err);
37122+ atomic_inc_unchecked(&vcc->stats->tx_err);
37123 dev_kfree_skb(skb);
37124 return -EINVAL;
37125 }
37126
37127 if (skb_shinfo(skb)->nr_frags != 0) {
37128 printk("%s: No scatter-gather yet.\n", card->name);
37129- atomic_inc(&vcc->stats->tx_err);
37130+ atomic_inc_unchecked(&vcc->stats->tx_err);
37131 dev_kfree_skb(skb);
37132 return -EINVAL;
37133 }
37134@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37135
37136 err = queue_skb(card, vc, skb, oam);
37137 if (err) {
37138- atomic_inc(&vcc->stats->tx_err);
37139+ atomic_inc_unchecked(&vcc->stats->tx_err);
37140 dev_kfree_skb(skb);
37141 return err;
37142 }
37143@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37144 skb = dev_alloc_skb(64);
37145 if (!skb) {
37146 printk("%s: Out of memory in send_oam().\n", card->name);
37147- atomic_inc(&vcc->stats->tx_err);
37148+ atomic_inc_unchecked(&vcc->stats->tx_err);
37149 return -ENOMEM;
37150 }
37151 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37152diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37153index 4217f29..88f547a 100644
37154--- a/drivers/atm/iphase.c
37155+++ b/drivers/atm/iphase.c
37156@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37157 status = (u_short) (buf_desc_ptr->desc_mode);
37158 if (status & (RX_CER | RX_PTE | RX_OFL))
37159 {
37160- atomic_inc(&vcc->stats->rx_err);
37161+ atomic_inc_unchecked(&vcc->stats->rx_err);
37162 IF_ERR(printk("IA: bad packet, dropping it");)
37163 if (status & RX_CER) {
37164 IF_ERR(printk(" cause: packet CRC error\n");)
37165@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37166 len = dma_addr - buf_addr;
37167 if (len > iadev->rx_buf_sz) {
37168 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37169- atomic_inc(&vcc->stats->rx_err);
37170+ atomic_inc_unchecked(&vcc->stats->rx_err);
37171 goto out_free_desc;
37172 }
37173
37174@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37175 ia_vcc = INPH_IA_VCC(vcc);
37176 if (ia_vcc == NULL)
37177 {
37178- atomic_inc(&vcc->stats->rx_err);
37179+ atomic_inc_unchecked(&vcc->stats->rx_err);
37180 atm_return(vcc, skb->truesize);
37181 dev_kfree_skb_any(skb);
37182 goto INCR_DLE;
37183@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37184 if ((length > iadev->rx_buf_sz) || (length >
37185 (skb->len - sizeof(struct cpcs_trailer))))
37186 {
37187- atomic_inc(&vcc->stats->rx_err);
37188+ atomic_inc_unchecked(&vcc->stats->rx_err);
37189 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37190 length, skb->len);)
37191 atm_return(vcc, skb->truesize);
37192@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37193
37194 IF_RX(printk("rx_dle_intr: skb push");)
37195 vcc->push(vcc,skb);
37196- atomic_inc(&vcc->stats->rx);
37197+ atomic_inc_unchecked(&vcc->stats->rx);
37198 iadev->rx_pkt_cnt++;
37199 }
37200 INCR_DLE:
37201@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37202 {
37203 struct k_sonet_stats *stats;
37204 stats = &PRIV(_ia_dev[board])->sonet_stats;
37205- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37206- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37207- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37208- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37209- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37210- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37211- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37212- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37213- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37214+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37215+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37216+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37217+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37218+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37219+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37220+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37221+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37222+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37223 }
37224 ia_cmds.status = 0;
37225 break;
37226@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37227 if ((desc == 0) || (desc > iadev->num_tx_desc))
37228 {
37229 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37230- atomic_inc(&vcc->stats->tx);
37231+ atomic_inc_unchecked(&vcc->stats->tx);
37232 if (vcc->pop)
37233 vcc->pop(vcc, skb);
37234 else
37235@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37236 ATM_DESC(skb) = vcc->vci;
37237 skb_queue_tail(&iadev->tx_dma_q, skb);
37238
37239- atomic_inc(&vcc->stats->tx);
37240+ atomic_inc_unchecked(&vcc->stats->tx);
37241 iadev->tx_pkt_cnt++;
37242 /* Increment transaction counter */
37243 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37244
37245 #if 0
37246 /* add flow control logic */
37247- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37248+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37249 if (iavcc->vc_desc_cnt > 10) {
37250 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37251 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37252diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37253index 93eaf8d..b4ca7da 100644
37254--- a/drivers/atm/lanai.c
37255+++ b/drivers/atm/lanai.c
37256@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37257 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37258 lanai_endtx(lanai, lvcc);
37259 lanai_free_skb(lvcc->tx.atmvcc, skb);
37260- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37261+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37262 }
37263
37264 /* Try to fill the buffer - don't call unless there is backlog */
37265@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37266 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37267 __net_timestamp(skb);
37268 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37269- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37270+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37271 out:
37272 lvcc->rx.buf.ptr = end;
37273 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37274@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37275 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37276 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37277 lanai->stats.service_rxnotaal5++;
37278- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37279+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37280 return 0;
37281 }
37282 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37283@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37284 int bytes;
37285 read_unlock(&vcc_sklist_lock);
37286 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37287- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37288+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37289 lvcc->stats.x.aal5.service_trash++;
37290 bytes = (SERVICE_GET_END(s) * 16) -
37291 (((unsigned long) lvcc->rx.buf.ptr) -
37292@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37293 }
37294 if (s & SERVICE_STREAM) {
37295 read_unlock(&vcc_sklist_lock);
37296- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37297+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37298 lvcc->stats.x.aal5.service_stream++;
37299 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37300 "PDU on VCI %d!\n", lanai->number, vci);
37301@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37302 return 0;
37303 }
37304 DPRINTK("got rx crc error on vci %d\n", vci);
37305- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37306+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37307 lvcc->stats.x.aal5.service_rxcrc++;
37308 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37309 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37310diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37311index 9988ac9..7c52585 100644
37312--- a/drivers/atm/nicstar.c
37313+++ b/drivers/atm/nicstar.c
37314@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37315 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37316 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37317 card->index);
37318- atomic_inc(&vcc->stats->tx_err);
37319+ atomic_inc_unchecked(&vcc->stats->tx_err);
37320 dev_kfree_skb_any(skb);
37321 return -EINVAL;
37322 }
37323@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37324 if (!vc->tx) {
37325 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37326 card->index);
37327- atomic_inc(&vcc->stats->tx_err);
37328+ atomic_inc_unchecked(&vcc->stats->tx_err);
37329 dev_kfree_skb_any(skb);
37330 return -EINVAL;
37331 }
37332@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37333 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37334 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37335 card->index);
37336- atomic_inc(&vcc->stats->tx_err);
37337+ atomic_inc_unchecked(&vcc->stats->tx_err);
37338 dev_kfree_skb_any(skb);
37339 return -EINVAL;
37340 }
37341
37342 if (skb_shinfo(skb)->nr_frags != 0) {
37343 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37344- atomic_inc(&vcc->stats->tx_err);
37345+ atomic_inc_unchecked(&vcc->stats->tx_err);
37346 dev_kfree_skb_any(skb);
37347 return -EINVAL;
37348 }
37349@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37350 }
37351
37352 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37353- atomic_inc(&vcc->stats->tx_err);
37354+ atomic_inc_unchecked(&vcc->stats->tx_err);
37355 dev_kfree_skb_any(skb);
37356 return -EIO;
37357 }
37358- atomic_inc(&vcc->stats->tx);
37359+ atomic_inc_unchecked(&vcc->stats->tx);
37360
37361 return 0;
37362 }
37363@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37364 printk
37365 ("nicstar%d: Can't allocate buffers for aal0.\n",
37366 card->index);
37367- atomic_add(i, &vcc->stats->rx_drop);
37368+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37369 break;
37370 }
37371 if (!atm_charge(vcc, sb->truesize)) {
37372 RXPRINTK
37373 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37374 card->index);
37375- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37376+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37377 dev_kfree_skb_any(sb);
37378 break;
37379 }
37380@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37381 ATM_SKB(sb)->vcc = vcc;
37382 __net_timestamp(sb);
37383 vcc->push(vcc, sb);
37384- atomic_inc(&vcc->stats->rx);
37385+ atomic_inc_unchecked(&vcc->stats->rx);
37386 cell += ATM_CELL_PAYLOAD;
37387 }
37388
37389@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37390 if (iovb == NULL) {
37391 printk("nicstar%d: Out of iovec buffers.\n",
37392 card->index);
37393- atomic_inc(&vcc->stats->rx_drop);
37394+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37395 recycle_rx_buf(card, skb);
37396 return;
37397 }
37398@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37399 small or large buffer itself. */
37400 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37401 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37402- atomic_inc(&vcc->stats->rx_err);
37403+ atomic_inc_unchecked(&vcc->stats->rx_err);
37404 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37405 NS_MAX_IOVECS);
37406 NS_PRV_IOVCNT(iovb) = 0;
37407@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37408 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37409 card->index);
37410 which_list(card, skb);
37411- atomic_inc(&vcc->stats->rx_err);
37412+ atomic_inc_unchecked(&vcc->stats->rx_err);
37413 recycle_rx_buf(card, skb);
37414 vc->rx_iov = NULL;
37415 recycle_iov_buf(card, iovb);
37416@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37417 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37418 card->index);
37419 which_list(card, skb);
37420- atomic_inc(&vcc->stats->rx_err);
37421+ atomic_inc_unchecked(&vcc->stats->rx_err);
37422 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37423 NS_PRV_IOVCNT(iovb));
37424 vc->rx_iov = NULL;
37425@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37426 printk(" - PDU size mismatch.\n");
37427 else
37428 printk(".\n");
37429- atomic_inc(&vcc->stats->rx_err);
37430+ atomic_inc_unchecked(&vcc->stats->rx_err);
37431 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37432 NS_PRV_IOVCNT(iovb));
37433 vc->rx_iov = NULL;
37434@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37435 /* skb points to a small buffer */
37436 if (!atm_charge(vcc, skb->truesize)) {
37437 push_rxbufs(card, skb);
37438- atomic_inc(&vcc->stats->rx_drop);
37439+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37440 } else {
37441 skb_put(skb, len);
37442 dequeue_sm_buf(card, skb);
37443@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37444 ATM_SKB(skb)->vcc = vcc;
37445 __net_timestamp(skb);
37446 vcc->push(vcc, skb);
37447- atomic_inc(&vcc->stats->rx);
37448+ atomic_inc_unchecked(&vcc->stats->rx);
37449 }
37450 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37451 struct sk_buff *sb;
37452@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37453 if (len <= NS_SMBUFSIZE) {
37454 if (!atm_charge(vcc, sb->truesize)) {
37455 push_rxbufs(card, sb);
37456- atomic_inc(&vcc->stats->rx_drop);
37457+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37458 } else {
37459 skb_put(sb, len);
37460 dequeue_sm_buf(card, sb);
37461@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37462 ATM_SKB(sb)->vcc = vcc;
37463 __net_timestamp(sb);
37464 vcc->push(vcc, sb);
37465- atomic_inc(&vcc->stats->rx);
37466+ atomic_inc_unchecked(&vcc->stats->rx);
37467 }
37468
37469 push_rxbufs(card, skb);
37470@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37471
37472 if (!atm_charge(vcc, skb->truesize)) {
37473 push_rxbufs(card, skb);
37474- atomic_inc(&vcc->stats->rx_drop);
37475+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37476 } else {
37477 dequeue_lg_buf(card, skb);
37478 #ifdef NS_USE_DESTRUCTORS
37479@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37480 ATM_SKB(skb)->vcc = vcc;
37481 __net_timestamp(skb);
37482 vcc->push(vcc, skb);
37483- atomic_inc(&vcc->stats->rx);
37484+ atomic_inc_unchecked(&vcc->stats->rx);
37485 }
37486
37487 push_rxbufs(card, sb);
37488@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37489 printk
37490 ("nicstar%d: Out of huge buffers.\n",
37491 card->index);
37492- atomic_inc(&vcc->stats->rx_drop);
37493+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37494 recycle_iovec_rx_bufs(card,
37495 (struct iovec *)
37496 iovb->data,
37497@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37498 card->hbpool.count++;
37499 } else
37500 dev_kfree_skb_any(hb);
37501- atomic_inc(&vcc->stats->rx_drop);
37502+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37503 } else {
37504 /* Copy the small buffer to the huge buffer */
37505 sb = (struct sk_buff *)iov->iov_base;
37506@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37507 #endif /* NS_USE_DESTRUCTORS */
37508 __net_timestamp(hb);
37509 vcc->push(vcc, hb);
37510- atomic_inc(&vcc->stats->rx);
37511+ atomic_inc_unchecked(&vcc->stats->rx);
37512 }
37513 }
37514
37515diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37516index 21b0bc6..b5f40ba 100644
37517--- a/drivers/atm/solos-pci.c
37518+++ b/drivers/atm/solos-pci.c
37519@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37520 }
37521 atm_charge(vcc, skb->truesize);
37522 vcc->push(vcc, skb);
37523- atomic_inc(&vcc->stats->rx);
37524+ atomic_inc_unchecked(&vcc->stats->rx);
37525 break;
37526
37527 case PKT_STATUS:
37528@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37529 vcc = SKB_CB(oldskb)->vcc;
37530
37531 if (vcc) {
37532- atomic_inc(&vcc->stats->tx);
37533+ atomic_inc_unchecked(&vcc->stats->tx);
37534 solos_pop(vcc, oldskb);
37535 } else {
37536 dev_kfree_skb_irq(oldskb);
37537diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37538index 0215934..ce9f5b1 100644
37539--- a/drivers/atm/suni.c
37540+++ b/drivers/atm/suni.c
37541@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37542
37543
37544 #define ADD_LIMITED(s,v) \
37545- atomic_add((v),&stats->s); \
37546- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37547+ atomic_add_unchecked((v),&stats->s); \
37548+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37549
37550
37551 static void suni_hz(unsigned long from_timer)
37552diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37553index 5120a96..e2572bd 100644
37554--- a/drivers/atm/uPD98402.c
37555+++ b/drivers/atm/uPD98402.c
37556@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37557 struct sonet_stats tmp;
37558 int error = 0;
37559
37560- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37561+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37562 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37563 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37564 if (zero && !error) {
37565@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37566
37567
37568 #define ADD_LIMITED(s,v) \
37569- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37570- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37571- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37572+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37573+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37574+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37575
37576
37577 static void stat_event(struct atm_dev *dev)
37578@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37579 if (reason & uPD98402_INT_PFM) stat_event(dev);
37580 if (reason & uPD98402_INT_PCO) {
37581 (void) GET(PCOCR); /* clear interrupt cause */
37582- atomic_add(GET(HECCT),
37583+ atomic_add_unchecked(GET(HECCT),
37584 &PRIV(dev)->sonet_stats.uncorr_hcs);
37585 }
37586 if ((reason & uPD98402_INT_RFO) &&
37587@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37588 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37589 uPD98402_INT_LOS),PIMR); /* enable them */
37590 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37591- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37592- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37593- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37594+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37595+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37596+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37597 return 0;
37598 }
37599
37600diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37601index 969c3c2..9b72956 100644
37602--- a/drivers/atm/zatm.c
37603+++ b/drivers/atm/zatm.c
37604@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37605 }
37606 if (!size) {
37607 dev_kfree_skb_irq(skb);
37608- if (vcc) atomic_inc(&vcc->stats->rx_err);
37609+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37610 continue;
37611 }
37612 if (!atm_charge(vcc,skb->truesize)) {
37613@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37614 skb->len = size;
37615 ATM_SKB(skb)->vcc = vcc;
37616 vcc->push(vcc,skb);
37617- atomic_inc(&vcc->stats->rx);
37618+ atomic_inc_unchecked(&vcc->stats->rx);
37619 }
37620 zout(pos & 0xffff,MTA(mbx));
37621 #if 0 /* probably a stupid idea */
37622@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37623 skb_queue_head(&zatm_vcc->backlog,skb);
37624 break;
37625 }
37626- atomic_inc(&vcc->stats->tx);
37627+ atomic_inc_unchecked(&vcc->stats->tx);
37628 wake_up(&zatm_vcc->tx_wait);
37629 }
37630
37631diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37632index 876bae5..8978785 100644
37633--- a/drivers/base/bus.c
37634+++ b/drivers/base/bus.c
37635@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37636 return -EINVAL;
37637
37638 mutex_lock(&subsys->p->mutex);
37639- list_add_tail(&sif->node, &subsys->p->interfaces);
37640+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37641 if (sif->add_dev) {
37642 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37643 while ((dev = subsys_dev_iter_next(&iter)))
37644@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37645 subsys = sif->subsys;
37646
37647 mutex_lock(&subsys->p->mutex);
37648- list_del_init(&sif->node);
37649+ pax_list_del_init((struct list_head *)&sif->node);
37650 if (sif->remove_dev) {
37651 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37652 while ((dev = subsys_dev_iter_next(&iter)))
37653diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37654index 25798db..15f130e 100644
37655--- a/drivers/base/devtmpfs.c
37656+++ b/drivers/base/devtmpfs.c
37657@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37658 if (!thread)
37659 return 0;
37660
37661- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37662+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37663 if (err)
37664 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37665 else
37666@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37667 *err = sys_unshare(CLONE_NEWNS);
37668 if (*err)
37669 goto out;
37670- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37671+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37672 if (*err)
37673 goto out;
37674- sys_chdir("/.."); /* will traverse into overmounted root */
37675- sys_chroot(".");
37676+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37677+ sys_chroot((char __force_user *)".");
37678 complete(&setup_done);
37679 while (1) {
37680 spin_lock(&req_lock);
37681diff --git a/drivers/base/node.c b/drivers/base/node.c
37682index a3b82e9..f90a8ce 100644
37683--- a/drivers/base/node.c
37684+++ b/drivers/base/node.c
37685@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37686 struct node_attr {
37687 struct device_attribute attr;
37688 enum node_states state;
37689-};
37690+} __do_const;
37691
37692 static ssize_t show_node_state(struct device *dev,
37693 struct device_attribute *attr, char *buf)
37694diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37695index 0d8780c..0b5df3f 100644
37696--- a/drivers/base/power/domain.c
37697+++ b/drivers/base/power/domain.c
37698@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37699 {
37700 struct cpuidle_driver *cpuidle_drv;
37701 struct gpd_cpuidle_data *cpuidle_data;
37702- struct cpuidle_state *idle_state;
37703+ cpuidle_state_no_const *idle_state;
37704 int ret = 0;
37705
37706 if (IS_ERR_OR_NULL(genpd) || state < 0)
37707@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37708 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37709 {
37710 struct gpd_cpuidle_data *cpuidle_data;
37711- struct cpuidle_state *idle_state;
37712+ cpuidle_state_no_const *idle_state;
37713 int ret = 0;
37714
37715 if (IS_ERR_OR_NULL(genpd))
37716@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37717 return ret;
37718 }
37719
37720- dev->pm_domain->detach = genpd_dev_pm_detach;
37721+ pax_open_kernel();
37722+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37723+ pax_close_kernel();
37724+
37725 pm_genpd_poweron(pd);
37726
37727 return 0;
37728diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37729index d2be3f9..0a3167a 100644
37730--- a/drivers/base/power/sysfs.c
37731+++ b/drivers/base/power/sysfs.c
37732@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37733 return -EIO;
37734 }
37735 }
37736- return sprintf(buf, p);
37737+ return sprintf(buf, "%s", p);
37738 }
37739
37740 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37741diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37742index c2744b3..08fac19 100644
37743--- a/drivers/base/power/wakeup.c
37744+++ b/drivers/base/power/wakeup.c
37745@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37746 * They need to be modified together atomically, so it's better to use one
37747 * atomic variable to hold them both.
37748 */
37749-static atomic_t combined_event_count = ATOMIC_INIT(0);
37750+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37751
37752 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37753 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37754
37755 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37756 {
37757- unsigned int comb = atomic_read(&combined_event_count);
37758+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37759
37760 *cnt = (comb >> IN_PROGRESS_BITS);
37761 *inpr = comb & MAX_IN_PROGRESS;
37762@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37763 ws->start_prevent_time = ws->last_time;
37764
37765 /* Increment the counter of events in progress. */
37766- cec = atomic_inc_return(&combined_event_count);
37767+ cec = atomic_inc_return_unchecked(&combined_event_count);
37768
37769 trace_wakeup_source_activate(ws->name, cec);
37770 }
37771@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37772 * Increment the counter of registered wakeup events and decrement the
37773 * couter of wakeup events in progress simultaneously.
37774 */
37775- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37776+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37777 trace_wakeup_source_deactivate(ws->name, cec);
37778
37779 split_counters(&cnt, &inpr);
37780diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37781index 8d98a32..61d3165 100644
37782--- a/drivers/base/syscore.c
37783+++ b/drivers/base/syscore.c
37784@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37785 void register_syscore_ops(struct syscore_ops *ops)
37786 {
37787 mutex_lock(&syscore_ops_lock);
37788- list_add_tail(&ops->node, &syscore_ops_list);
37789+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37790 mutex_unlock(&syscore_ops_lock);
37791 }
37792 EXPORT_SYMBOL_GPL(register_syscore_ops);
37793@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37794 void unregister_syscore_ops(struct syscore_ops *ops)
37795 {
37796 mutex_lock(&syscore_ops_lock);
37797- list_del(&ops->node);
37798+ pax_list_del((struct list_head *)&ops->node);
37799 mutex_unlock(&syscore_ops_lock);
37800 }
37801 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37802diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37803index ff20f19..018f1da 100644
37804--- a/drivers/block/cciss.c
37805+++ b/drivers/block/cciss.c
37806@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37807 while (!list_empty(&h->reqQ)) {
37808 c = list_entry(h->reqQ.next, CommandList_struct, list);
37809 /* can't do anything if fifo is full */
37810- if ((h->access.fifo_full(h))) {
37811+ if ((h->access->fifo_full(h))) {
37812 dev_warn(&h->pdev->dev, "fifo full\n");
37813 break;
37814 }
37815@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37816 h->Qdepth--;
37817
37818 /* Tell the controller execute command */
37819- h->access.submit_command(h, c);
37820+ h->access->submit_command(h, c);
37821
37822 /* Put job onto the completed Q */
37823 addQ(&h->cmpQ, c);
37824@@ -3444,17 +3444,17 @@ startio:
37825
37826 static inline unsigned long get_next_completion(ctlr_info_t *h)
37827 {
37828- return h->access.command_completed(h);
37829+ return h->access->command_completed(h);
37830 }
37831
37832 static inline int interrupt_pending(ctlr_info_t *h)
37833 {
37834- return h->access.intr_pending(h);
37835+ return h->access->intr_pending(h);
37836 }
37837
37838 static inline long interrupt_not_for_us(ctlr_info_t *h)
37839 {
37840- return ((h->access.intr_pending(h) == 0) ||
37841+ return ((h->access->intr_pending(h) == 0) ||
37842 (h->interrupts_enabled == 0));
37843 }
37844
37845@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37846 u32 a;
37847
37848 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37849- return h->access.command_completed(h);
37850+ return h->access->command_completed(h);
37851
37852 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37853 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37854@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37855 trans_support & CFGTBL_Trans_use_short_tags);
37856
37857 /* Change the access methods to the performant access methods */
37858- h->access = SA5_performant_access;
37859+ h->access = &SA5_performant_access;
37860 h->transMethod = CFGTBL_Trans_Performant;
37861
37862 return;
37863@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37864 if (prod_index < 0)
37865 return -ENODEV;
37866 h->product_name = products[prod_index].product_name;
37867- h->access = *(products[prod_index].access);
37868+ h->access = products[prod_index].access;
37869
37870 if (cciss_board_disabled(h)) {
37871 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37872@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37873 }
37874
37875 /* make sure the board interrupts are off */
37876- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37877+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37878 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37879 if (rc)
37880 goto clean2;
37881@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37882 * fake ones to scoop up any residual completions.
37883 */
37884 spin_lock_irqsave(&h->lock, flags);
37885- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37886+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37887 spin_unlock_irqrestore(&h->lock, flags);
37888 free_irq(h->intr[h->intr_mode], h);
37889 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37890@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37891 dev_info(&h->pdev->dev, "Board READY.\n");
37892 dev_info(&h->pdev->dev,
37893 "Waiting for stale completions to drain.\n");
37894- h->access.set_intr_mask(h, CCISS_INTR_ON);
37895+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37896 msleep(10000);
37897- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37898+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37899
37900 rc = controller_reset_failed(h->cfgtable);
37901 if (rc)
37902@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37903 cciss_scsi_setup(h);
37904
37905 /* Turn the interrupts on so we can service requests */
37906- h->access.set_intr_mask(h, CCISS_INTR_ON);
37907+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37908
37909 /* Get the firmware version */
37910 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37911@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37912 kfree(flush_buf);
37913 if (return_code != IO_OK)
37914 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37915- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37916+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37917 free_irq(h->intr[h->intr_mode], h);
37918 }
37919
37920diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37921index 7fda30e..2f27946 100644
37922--- a/drivers/block/cciss.h
37923+++ b/drivers/block/cciss.h
37924@@ -101,7 +101,7 @@ struct ctlr_info
37925 /* information about each logical volume */
37926 drive_info_struct *drv[CISS_MAX_LUN];
37927
37928- struct access_method access;
37929+ struct access_method *access;
37930
37931 /* queue and queue Info */
37932 struct list_head reqQ;
37933@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37934 }
37935
37936 static struct access_method SA5_access = {
37937- SA5_submit_command,
37938- SA5_intr_mask,
37939- SA5_fifo_full,
37940- SA5_intr_pending,
37941- SA5_completed,
37942+ .submit_command = SA5_submit_command,
37943+ .set_intr_mask = SA5_intr_mask,
37944+ .fifo_full = SA5_fifo_full,
37945+ .intr_pending = SA5_intr_pending,
37946+ .command_completed = SA5_completed,
37947 };
37948
37949 static struct access_method SA5B_access = {
37950- SA5_submit_command,
37951- SA5B_intr_mask,
37952- SA5_fifo_full,
37953- SA5B_intr_pending,
37954- SA5_completed,
37955+ .submit_command = SA5_submit_command,
37956+ .set_intr_mask = SA5B_intr_mask,
37957+ .fifo_full = SA5_fifo_full,
37958+ .intr_pending = SA5B_intr_pending,
37959+ .command_completed = SA5_completed,
37960 };
37961
37962 static struct access_method SA5_performant_access = {
37963- SA5_submit_command,
37964- SA5_performant_intr_mask,
37965- SA5_fifo_full,
37966- SA5_performant_intr_pending,
37967- SA5_performant_completed,
37968+ .submit_command = SA5_submit_command,
37969+ .set_intr_mask = SA5_performant_intr_mask,
37970+ .fifo_full = SA5_fifo_full,
37971+ .intr_pending = SA5_performant_intr_pending,
37972+ .command_completed = SA5_performant_completed,
37973 };
37974
37975 struct board_type {
37976diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37977index 2b94403..fd6ad1f 100644
37978--- a/drivers/block/cpqarray.c
37979+++ b/drivers/block/cpqarray.c
37980@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37981 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37982 goto Enomem4;
37983 }
37984- hba[i]->access.set_intr_mask(hba[i], 0);
37985+ hba[i]->access->set_intr_mask(hba[i], 0);
37986 if (request_irq(hba[i]->intr, do_ida_intr,
37987 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37988 {
37989@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37990 add_timer(&hba[i]->timer);
37991
37992 /* Enable IRQ now that spinlock and rate limit timer are set up */
37993- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37994+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37995
37996 for(j=0; j<NWD; j++) {
37997 struct gendisk *disk = ida_gendisk[i][j];
37998@@ -694,7 +694,7 @@ DBGINFO(
37999 for(i=0; i<NR_PRODUCTS; i++) {
38000 if (board_id == products[i].board_id) {
38001 c->product_name = products[i].product_name;
38002- c->access = *(products[i].access);
38003+ c->access = products[i].access;
38004 break;
38005 }
38006 }
38007@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38008 hba[ctlr]->intr = intr;
38009 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38010 hba[ctlr]->product_name = products[j].product_name;
38011- hba[ctlr]->access = *(products[j].access);
38012+ hba[ctlr]->access = products[j].access;
38013 hba[ctlr]->ctlr = ctlr;
38014 hba[ctlr]->board_id = board_id;
38015 hba[ctlr]->pci_dev = NULL; /* not PCI */
38016@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38017
38018 while((c = h->reqQ) != NULL) {
38019 /* Can't do anything if we're busy */
38020- if (h->access.fifo_full(h) == 0)
38021+ if (h->access->fifo_full(h) == 0)
38022 return;
38023
38024 /* Get the first entry from the request Q */
38025@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38026 h->Qdepth--;
38027
38028 /* Tell the controller to do our bidding */
38029- h->access.submit_command(h, c);
38030+ h->access->submit_command(h, c);
38031
38032 /* Get onto the completion Q */
38033 addQ(&h->cmpQ, c);
38034@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38035 unsigned long flags;
38036 __u32 a,a1;
38037
38038- istat = h->access.intr_pending(h);
38039+ istat = h->access->intr_pending(h);
38040 /* Is this interrupt for us? */
38041 if (istat == 0)
38042 return IRQ_NONE;
38043@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38044 */
38045 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38046 if (istat & FIFO_NOT_EMPTY) {
38047- while((a = h->access.command_completed(h))) {
38048+ while((a = h->access->command_completed(h))) {
38049 a1 = a; a &= ~3;
38050 if ((c = h->cmpQ) == NULL)
38051 {
38052@@ -1448,11 +1448,11 @@ static int sendcmd(
38053 /*
38054 * Disable interrupt
38055 */
38056- info_p->access.set_intr_mask(info_p, 0);
38057+ info_p->access->set_intr_mask(info_p, 0);
38058 /* Make sure there is room in the command FIFO */
38059 /* Actually it should be completely empty at this time. */
38060 for (i = 200000; i > 0; i--) {
38061- temp = info_p->access.fifo_full(info_p);
38062+ temp = info_p->access->fifo_full(info_p);
38063 if (temp != 0) {
38064 break;
38065 }
38066@@ -1465,7 +1465,7 @@ DBG(
38067 /*
38068 * Send the cmd
38069 */
38070- info_p->access.submit_command(info_p, c);
38071+ info_p->access->submit_command(info_p, c);
38072 complete = pollcomplete(ctlr);
38073
38074 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38075@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38076 * we check the new geometry. Then turn interrupts back on when
38077 * we're done.
38078 */
38079- host->access.set_intr_mask(host, 0);
38080+ host->access->set_intr_mask(host, 0);
38081 getgeometry(ctlr);
38082- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38083+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38084
38085 for(i=0; i<NWD; i++) {
38086 struct gendisk *disk = ida_gendisk[ctlr][i];
38087@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38088 /* Wait (up to 2 seconds) for a command to complete */
38089
38090 for (i = 200000; i > 0; i--) {
38091- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38092+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38093 if (done == 0) {
38094 udelay(10); /* a short fixed delay */
38095 } else
38096diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38097index be73e9d..7fbf140 100644
38098--- a/drivers/block/cpqarray.h
38099+++ b/drivers/block/cpqarray.h
38100@@ -99,7 +99,7 @@ struct ctlr_info {
38101 drv_info_t drv[NWD];
38102 struct proc_dir_entry *proc;
38103
38104- struct access_method access;
38105+ struct access_method *access;
38106
38107 cmdlist_t *reqQ;
38108 cmdlist_t *cmpQ;
38109diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38110index 434c77d..6d3219a 100644
38111--- a/drivers/block/drbd/drbd_bitmap.c
38112+++ b/drivers/block/drbd/drbd_bitmap.c
38113@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38114 submit_bio(rw, bio);
38115 /* this should not count as user activity and cause the
38116 * resync to throttle -- see drbd_rs_should_slow_down(). */
38117- atomic_add(len >> 9, &device->rs_sect_ev);
38118+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38119 }
38120 }
38121
38122diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38123index b905e98..0812ed8 100644
38124--- a/drivers/block/drbd/drbd_int.h
38125+++ b/drivers/block/drbd/drbd_int.h
38126@@ -385,7 +385,7 @@ struct drbd_epoch {
38127 struct drbd_connection *connection;
38128 struct list_head list;
38129 unsigned int barrier_nr;
38130- atomic_t epoch_size; /* increased on every request added. */
38131+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38132 atomic_t active; /* increased on every req. added, and dec on every finished. */
38133 unsigned long flags;
38134 };
38135@@ -946,7 +946,7 @@ struct drbd_device {
38136 unsigned int al_tr_number;
38137 int al_tr_cycle;
38138 wait_queue_head_t seq_wait;
38139- atomic_t packet_seq;
38140+ atomic_unchecked_t packet_seq;
38141 unsigned int peer_seq;
38142 spinlock_t peer_seq_lock;
38143 unsigned long comm_bm_set; /* communicated number of set bits. */
38144@@ -955,8 +955,8 @@ struct drbd_device {
38145 struct mutex own_state_mutex;
38146 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38147 char congestion_reason; /* Why we where congested... */
38148- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38149- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38150+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38151+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38152 int rs_last_sect_ev; /* counter to compare with */
38153 int rs_last_events; /* counter of read or write "events" (unit sectors)
38154 * on the lower level device when we last looked. */
38155diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38156index 1fc8342..7e7742b 100644
38157--- a/drivers/block/drbd/drbd_main.c
38158+++ b/drivers/block/drbd/drbd_main.c
38159@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38160 p->sector = sector;
38161 p->block_id = block_id;
38162 p->blksize = blksize;
38163- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38164+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38165 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38166 }
38167
38168@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38169 return -EIO;
38170 p->sector = cpu_to_be64(req->i.sector);
38171 p->block_id = (unsigned long)req;
38172- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38173+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38174 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38175 if (device->state.conn >= C_SYNC_SOURCE &&
38176 device->state.conn <= C_PAUSED_SYNC_T)
38177@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38178 atomic_set(&device->unacked_cnt, 0);
38179 atomic_set(&device->local_cnt, 0);
38180 atomic_set(&device->pp_in_use_by_net, 0);
38181- atomic_set(&device->rs_sect_in, 0);
38182- atomic_set(&device->rs_sect_ev, 0);
38183+ atomic_set_unchecked(&device->rs_sect_in, 0);
38184+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38185 atomic_set(&device->ap_in_flight, 0);
38186 atomic_set(&device->md_io.in_use, 0);
38187
38188@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38189 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38190 struct drbd_resource *resource = connection->resource;
38191
38192- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38193- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38194+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38195+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38196 kfree(connection->current_epoch);
38197
38198 idr_destroy(&connection->peer_devices);
38199diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38200index 74df8cf..e41fc24 100644
38201--- a/drivers/block/drbd/drbd_nl.c
38202+++ b/drivers/block/drbd/drbd_nl.c
38203@@ -3637,13 +3637,13 @@ finish:
38204
38205 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38206 {
38207- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38208+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38209 struct sk_buff *msg;
38210 struct drbd_genlmsghdr *d_out;
38211 unsigned seq;
38212 int err = -ENOMEM;
38213
38214- seq = atomic_inc_return(&drbd_genl_seq);
38215+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38216 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38217 if (!msg)
38218 goto failed;
38219diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38220index d169b4a..481463f 100644
38221--- a/drivers/block/drbd/drbd_receiver.c
38222+++ b/drivers/block/drbd/drbd_receiver.c
38223@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38224 struct drbd_device *device = peer_device->device;
38225 int err;
38226
38227- atomic_set(&device->packet_seq, 0);
38228+ atomic_set_unchecked(&device->packet_seq, 0);
38229 device->peer_seq = 0;
38230
38231 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38232@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38233 do {
38234 next_epoch = NULL;
38235
38236- epoch_size = atomic_read(&epoch->epoch_size);
38237+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38238
38239 switch (ev & ~EV_CLEANUP) {
38240 case EV_PUT:
38241@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38242 rv = FE_DESTROYED;
38243 } else {
38244 epoch->flags = 0;
38245- atomic_set(&epoch->epoch_size, 0);
38246+ atomic_set_unchecked(&epoch->epoch_size, 0);
38247 /* atomic_set(&epoch->active, 0); is already zero */
38248 if (rv == FE_STILL_LIVE)
38249 rv = FE_RECYCLED;
38250@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38251 conn_wait_active_ee_empty(connection);
38252 drbd_flush(connection);
38253
38254- if (atomic_read(&connection->current_epoch->epoch_size)) {
38255+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38256 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38257 if (epoch)
38258 break;
38259@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38260 }
38261
38262 epoch->flags = 0;
38263- atomic_set(&epoch->epoch_size, 0);
38264+ atomic_set_unchecked(&epoch->epoch_size, 0);
38265 atomic_set(&epoch->active, 0);
38266
38267 spin_lock(&connection->epoch_lock);
38268- if (atomic_read(&connection->current_epoch->epoch_size)) {
38269+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38270 list_add(&epoch->list, &connection->current_epoch->list);
38271 connection->current_epoch = epoch;
38272 connection->epochs++;
38273@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38274 list_add_tail(&peer_req->w.list, &device->sync_ee);
38275 spin_unlock_irq(&device->resource->req_lock);
38276
38277- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38278+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38279 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38280 return 0;
38281
38282@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38283 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38284 }
38285
38286- atomic_add(pi->size >> 9, &device->rs_sect_in);
38287+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38288
38289 return err;
38290 }
38291@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38292
38293 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38294 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38295- atomic_inc(&connection->current_epoch->epoch_size);
38296+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38297 err2 = drbd_drain_block(peer_device, pi->size);
38298 if (!err)
38299 err = err2;
38300@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38301
38302 spin_lock(&connection->epoch_lock);
38303 peer_req->epoch = connection->current_epoch;
38304- atomic_inc(&peer_req->epoch->epoch_size);
38305+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38306 atomic_inc(&peer_req->epoch->active);
38307 spin_unlock(&connection->epoch_lock);
38308
38309@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38310
38311 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38312 (int)part_stat_read(&disk->part0, sectors[1]) -
38313- atomic_read(&device->rs_sect_ev);
38314+ atomic_read_unchecked(&device->rs_sect_ev);
38315
38316 if (atomic_read(&device->ap_actlog_cnt)
38317 || curr_events - device->rs_last_events > 64) {
38318@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38319 device->use_csums = true;
38320 } else if (pi->cmd == P_OV_REPLY) {
38321 /* track progress, we may need to throttle */
38322- atomic_add(size >> 9, &device->rs_sect_in);
38323+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38324 peer_req->w.cb = w_e_end_ov_reply;
38325 dec_rs_pending(device);
38326 /* drbd_rs_begin_io done when we sent this request,
38327@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38328 goto out_free_e;
38329
38330 submit_for_resync:
38331- atomic_add(size >> 9, &device->rs_sect_ev);
38332+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38333
38334 submit:
38335 update_receiver_timing_details(connection, drbd_submit_peer_request);
38336@@ -4564,7 +4564,7 @@ struct data_cmd {
38337 int expect_payload;
38338 size_t pkt_size;
38339 int (*fn)(struct drbd_connection *, struct packet_info *);
38340-};
38341+} __do_const;
38342
38343 static struct data_cmd drbd_cmd_handler[] = {
38344 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38345@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38346 if (!list_empty(&connection->current_epoch->list))
38347 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38348 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38349- atomic_set(&connection->current_epoch->epoch_size, 0);
38350+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38351 connection->send.seen_any_write_yet = false;
38352
38353 drbd_info(connection, "Connection closed\n");
38354@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38355 put_ldev(device);
38356 }
38357 dec_rs_pending(device);
38358- atomic_add(blksize >> 9, &device->rs_sect_in);
38359+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38360
38361 return 0;
38362 }
38363@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38364 struct asender_cmd {
38365 size_t pkt_size;
38366 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38367-};
38368+} __do_const;
38369
38370 static struct asender_cmd asender_tbl[] = {
38371 [P_PING] = { 0, got_Ping },
38372diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38373index d0fae55..4469096 100644
38374--- a/drivers/block/drbd/drbd_worker.c
38375+++ b/drivers/block/drbd/drbd_worker.c
38376@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38377 list_add_tail(&peer_req->w.list, &device->read_ee);
38378 spin_unlock_irq(&device->resource->req_lock);
38379
38380- atomic_add(size >> 9, &device->rs_sect_ev);
38381+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38382 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38383 return 0;
38384
38385@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38386 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38387 int number, mxb;
38388
38389- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38390+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38391 device->rs_in_flight -= sect_in;
38392
38393 rcu_read_lock();
38394@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38395 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38396 struct fifo_buffer *plan;
38397
38398- atomic_set(&device->rs_sect_in, 0);
38399- atomic_set(&device->rs_sect_ev, 0);
38400+ atomic_set_unchecked(&device->rs_sect_in, 0);
38401+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38402 device->rs_in_flight = 0;
38403 device->rs_last_events =
38404 (int)part_stat_read(&disk->part0, sectors[0]) +
38405diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38406index 6cb1beb..bf490f7 100644
38407--- a/drivers/block/loop.c
38408+++ b/drivers/block/loop.c
38409@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38410
38411 file_start_write(file);
38412 set_fs(get_ds());
38413- bw = file->f_op->write(file, buf, len, &pos);
38414+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38415 set_fs(old_fs);
38416 file_end_write(file);
38417 if (likely(bw == len))
38418diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38419index d826bf3..8eb406c 100644
38420--- a/drivers/block/nvme-core.c
38421+++ b/drivers/block/nvme-core.c
38422@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38423 static struct task_struct *nvme_thread;
38424 static struct workqueue_struct *nvme_workq;
38425 static wait_queue_head_t nvme_kthread_wait;
38426-static struct notifier_block nvme_nb;
38427
38428 static void nvme_reset_failed_dev(struct work_struct *ws);
38429 static int nvme_process_cq(struct nvme_queue *nvmeq);
38430@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38431 static void __exit nvme_exit(void)
38432 {
38433 pci_unregister_driver(&nvme_driver);
38434- unregister_hotcpu_notifier(&nvme_nb);
38435 unregister_blkdev(nvme_major, "nvme");
38436 destroy_workqueue(nvme_workq);
38437 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38438diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38439index 09e628da..7607aaa 100644
38440--- a/drivers/block/pktcdvd.c
38441+++ b/drivers/block/pktcdvd.c
38442@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38443
38444 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38445 {
38446- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38447+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38448 }
38449
38450 /*
38451@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38452 return -EROFS;
38453 }
38454 pd->settings.fp = ti.fp;
38455- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38456+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38457
38458 if (ti.nwa_v) {
38459 pd->nwa = be32_to_cpu(ti.next_writable);
38460diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38461index 8a86b62..f54c87e 100644
38462--- a/drivers/block/rbd.c
38463+++ b/drivers/block/rbd.c
38464@@ -63,7 +63,7 @@
38465 * If the counter is already at its maximum value returns
38466 * -EINVAL without updating it.
38467 */
38468-static int atomic_inc_return_safe(atomic_t *v)
38469+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38470 {
38471 unsigned int counter;
38472
38473diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38474index e5565fb..71be10b4 100644
38475--- a/drivers/block/smart1,2.h
38476+++ b/drivers/block/smart1,2.h
38477@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38478 }
38479
38480 static struct access_method smart4_access = {
38481- smart4_submit_command,
38482- smart4_intr_mask,
38483- smart4_fifo_full,
38484- smart4_intr_pending,
38485- smart4_completed,
38486+ .submit_command = smart4_submit_command,
38487+ .set_intr_mask = smart4_intr_mask,
38488+ .fifo_full = smart4_fifo_full,
38489+ .intr_pending = smart4_intr_pending,
38490+ .command_completed = smart4_completed,
38491 };
38492
38493 /*
38494@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38495 }
38496
38497 static struct access_method smart2_access = {
38498- smart2_submit_command,
38499- smart2_intr_mask,
38500- smart2_fifo_full,
38501- smart2_intr_pending,
38502- smart2_completed,
38503+ .submit_command = smart2_submit_command,
38504+ .set_intr_mask = smart2_intr_mask,
38505+ .fifo_full = smart2_fifo_full,
38506+ .intr_pending = smart2_intr_pending,
38507+ .command_completed = smart2_completed,
38508 };
38509
38510 /*
38511@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38512 }
38513
38514 static struct access_method smart2e_access = {
38515- smart2e_submit_command,
38516- smart2e_intr_mask,
38517- smart2e_fifo_full,
38518- smart2e_intr_pending,
38519- smart2e_completed,
38520+ .submit_command = smart2e_submit_command,
38521+ .set_intr_mask = smart2e_intr_mask,
38522+ .fifo_full = smart2e_fifo_full,
38523+ .intr_pending = smart2e_intr_pending,
38524+ .command_completed = smart2e_completed,
38525 };
38526
38527 /*
38528@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38529 }
38530
38531 static struct access_method smart1_access = {
38532- smart1_submit_command,
38533- smart1_intr_mask,
38534- smart1_fifo_full,
38535- smart1_intr_pending,
38536- smart1_completed,
38537+ .submit_command = smart1_submit_command,
38538+ .set_intr_mask = smart1_intr_mask,
38539+ .fifo_full = smart1_fifo_full,
38540+ .intr_pending = smart1_intr_pending,
38541+ .command_completed = smart1_completed,
38542 };
38543diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38544index 55c135b..9f8d60c 100644
38545--- a/drivers/bluetooth/btwilink.c
38546+++ b/drivers/bluetooth/btwilink.c
38547@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38548
38549 static int bt_ti_probe(struct platform_device *pdev)
38550 {
38551- static struct ti_st *hst;
38552+ struct ti_st *hst;
38553 struct hci_dev *hdev;
38554 int err;
38555
38556diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38557index 5d28a45..a538f90 100644
38558--- a/drivers/cdrom/cdrom.c
38559+++ b/drivers/cdrom/cdrom.c
38560@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38561 ENSURE(reset, CDC_RESET);
38562 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38563 cdi->mc_flags = 0;
38564- cdo->n_minors = 0;
38565 cdi->options = CDO_USE_FFLAGS;
38566
38567 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38568@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38569 else
38570 cdi->cdda_method = CDDA_OLD;
38571
38572- if (!cdo->generic_packet)
38573- cdo->generic_packet = cdrom_dummy_generic_packet;
38574+ if (!cdo->generic_packet) {
38575+ pax_open_kernel();
38576+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38577+ pax_close_kernel();
38578+ }
38579
38580 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38581 mutex_lock(&cdrom_mutex);
38582@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38583 if (cdi->exit)
38584 cdi->exit(cdi);
38585
38586- cdi->ops->n_minors--;
38587 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38588 }
38589
38590@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38591 */
38592 nr = nframes;
38593 do {
38594- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38595+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38596 if (cgc.buffer)
38597 break;
38598
38599@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38600 struct cdrom_device_info *cdi;
38601 int ret;
38602
38603- ret = scnprintf(info + *pos, max_size - *pos, header);
38604+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38605 if (!ret)
38606 return 1;
38607
38608diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38609index 584bc31..e64a12c 100644
38610--- a/drivers/cdrom/gdrom.c
38611+++ b/drivers/cdrom/gdrom.c
38612@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38613 .audio_ioctl = gdrom_audio_ioctl,
38614 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38615 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38616- .n_minors = 1,
38617 };
38618
38619 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38620diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38621index efefd12..4f1d494 100644
38622--- a/drivers/char/Kconfig
38623+++ b/drivers/char/Kconfig
38624@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38625
38626 config DEVKMEM
38627 bool "/dev/kmem virtual device support"
38628- default y
38629+ default n
38630+ depends on !GRKERNSEC_KMEM
38631 help
38632 Say Y here if you want to support the /dev/kmem device. The
38633 /dev/kmem device is rarely used, but can be used for certain
38634@@ -577,6 +578,7 @@ config DEVPORT
38635 bool
38636 depends on !M68K
38637 depends on ISA || PCI
38638+ depends on !GRKERNSEC_KMEM
38639 default y
38640
38641 source "drivers/s390/char/Kconfig"
38642diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38643index a48e05b..6bac831 100644
38644--- a/drivers/char/agp/compat_ioctl.c
38645+++ b/drivers/char/agp/compat_ioctl.c
38646@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38647 return -ENOMEM;
38648 }
38649
38650- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38651+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38652 sizeof(*usegment) * ureserve.seg_count)) {
38653 kfree(usegment);
38654 kfree(ksegment);
38655diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38656index 09f17eb..8531d2f 100644
38657--- a/drivers/char/agp/frontend.c
38658+++ b/drivers/char/agp/frontend.c
38659@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38660 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38661 return -EFAULT;
38662
38663- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38664+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38665 return -EFAULT;
38666
38667 client = agp_find_client_by_pid(reserve.pid);
38668@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38669 if (segment == NULL)
38670 return -ENOMEM;
38671
38672- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38673+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38674 sizeof(struct agp_segment) * reserve.seg_count)) {
38675 kfree(segment);
38676 return -EFAULT;
38677diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38678index 4f94375..413694e 100644
38679--- a/drivers/char/genrtc.c
38680+++ b/drivers/char/genrtc.c
38681@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38682 switch (cmd) {
38683
38684 case RTC_PLL_GET:
38685+ memset(&pll, 0, sizeof(pll));
38686 if (get_rtc_pll(&pll))
38687 return -EINVAL;
38688 else
38689diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38690index d5d4cd8..22d561d 100644
38691--- a/drivers/char/hpet.c
38692+++ b/drivers/char/hpet.c
38693@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38694 }
38695
38696 static int
38697-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38698+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38699 struct hpet_info *info)
38700 {
38701 struct hpet_timer __iomem *timer;
38702diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38703index 6b65fa4..8ebbc99 100644
38704--- a/drivers/char/ipmi/ipmi_msghandler.c
38705+++ b/drivers/char/ipmi/ipmi_msghandler.c
38706@@ -436,7 +436,7 @@ struct ipmi_smi {
38707 struct proc_dir_entry *proc_dir;
38708 char proc_dir_name[10];
38709
38710- atomic_t stats[IPMI_NUM_STATS];
38711+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38712
38713 /*
38714 * run_to_completion duplicate of smb_info, smi_info
38715@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38716 static DEFINE_MUTEX(smi_watchers_mutex);
38717
38718 #define ipmi_inc_stat(intf, stat) \
38719- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38720+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38721 #define ipmi_get_stat(intf, stat) \
38722- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38723+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38724
38725 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38726 "ACPI", "SMBIOS", "PCI",
38727@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38728 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38729 init_waitqueue_head(&intf->waitq);
38730 for (i = 0; i < IPMI_NUM_STATS; i++)
38731- atomic_set(&intf->stats[i], 0);
38732+ atomic_set_unchecked(&intf->stats[i], 0);
38733
38734 intf->proc_dir = NULL;
38735
38736diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38737index 967b73a..946e94c 100644
38738--- a/drivers/char/ipmi/ipmi_si_intf.c
38739+++ b/drivers/char/ipmi/ipmi_si_intf.c
38740@@ -284,7 +284,7 @@ struct smi_info {
38741 unsigned char slave_addr;
38742
38743 /* Counters and things for the proc filesystem. */
38744- atomic_t stats[SI_NUM_STATS];
38745+ atomic_unchecked_t stats[SI_NUM_STATS];
38746
38747 struct task_struct *thread;
38748
38749@@ -293,9 +293,9 @@ struct smi_info {
38750 };
38751
38752 #define smi_inc_stat(smi, stat) \
38753- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38754+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38755 #define smi_get_stat(smi, stat) \
38756- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38757+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38758
38759 #define SI_MAX_PARMS 4
38760
38761@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38762 atomic_set(&new_smi->req_events, 0);
38763 new_smi->run_to_completion = false;
38764 for (i = 0; i < SI_NUM_STATS; i++)
38765- atomic_set(&new_smi->stats[i], 0);
38766+ atomic_set_unchecked(&new_smi->stats[i], 0);
38767
38768 new_smi->interrupt_disabled = true;
38769 atomic_set(&new_smi->need_watch, 0);
38770diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38771index 4c58333..d5cca27 100644
38772--- a/drivers/char/mem.c
38773+++ b/drivers/char/mem.c
38774@@ -18,6 +18,7 @@
38775 #include <linux/raw.h>
38776 #include <linux/tty.h>
38777 #include <linux/capability.h>
38778+#include <linux/security.h>
38779 #include <linux/ptrace.h>
38780 #include <linux/device.h>
38781 #include <linux/highmem.h>
38782@@ -36,6 +37,10 @@
38783
38784 #define DEVPORT_MINOR 4
38785
38786+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38787+extern const struct file_operations grsec_fops;
38788+#endif
38789+
38790 static inline unsigned long size_inside_page(unsigned long start,
38791 unsigned long size)
38792 {
38793@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38794
38795 while (cursor < to) {
38796 if (!devmem_is_allowed(pfn)) {
38797+#ifdef CONFIG_GRKERNSEC_KMEM
38798+ gr_handle_mem_readwrite(from, to);
38799+#else
38800 printk(KERN_INFO
38801 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38802 current->comm, from, to);
38803+#endif
38804 return 0;
38805 }
38806 cursor += PAGE_SIZE;
38807@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38808 }
38809 return 1;
38810 }
38811+#elif defined(CONFIG_GRKERNSEC_KMEM)
38812+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38813+{
38814+ return 0;
38815+}
38816 #else
38817 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38818 {
38819@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38820 #endif
38821
38822 while (count > 0) {
38823- unsigned long remaining;
38824+ unsigned long remaining = 0;
38825+ char *temp;
38826
38827 sz = size_inside_page(p, count);
38828
38829@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38830 if (!ptr)
38831 return -EFAULT;
38832
38833- remaining = copy_to_user(buf, ptr, sz);
38834+#ifdef CONFIG_PAX_USERCOPY
38835+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38836+ if (!temp) {
38837+ unxlate_dev_mem_ptr(p, ptr);
38838+ return -ENOMEM;
38839+ }
38840+ remaining = probe_kernel_read(temp, ptr, sz);
38841+#else
38842+ temp = ptr;
38843+#endif
38844+
38845+ if (!remaining)
38846+ remaining = copy_to_user(buf, temp, sz);
38847+
38848+#ifdef CONFIG_PAX_USERCOPY
38849+ kfree(temp);
38850+#endif
38851+
38852 unxlate_dev_mem_ptr(p, ptr);
38853 if (remaining)
38854 return -EFAULT;
38855@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38856 size_t count, loff_t *ppos)
38857 {
38858 unsigned long p = *ppos;
38859- ssize_t low_count, read, sz;
38860+ ssize_t low_count, read, sz, err = 0;
38861 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38862- int err = 0;
38863
38864 read = 0;
38865 if (p < (unsigned long) high_memory) {
38866@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38867 }
38868 #endif
38869 while (low_count > 0) {
38870+ char *temp;
38871+
38872 sz = size_inside_page(p, low_count);
38873
38874 /*
38875@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38876 */
38877 kbuf = xlate_dev_kmem_ptr((void *)p);
38878
38879- if (copy_to_user(buf, kbuf, sz))
38880+#ifdef CONFIG_PAX_USERCOPY
38881+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38882+ if (!temp)
38883+ return -ENOMEM;
38884+ err = probe_kernel_read(temp, kbuf, sz);
38885+#else
38886+ temp = kbuf;
38887+#endif
38888+
38889+ if (!err)
38890+ err = copy_to_user(buf, temp, sz);
38891+
38892+#ifdef CONFIG_PAX_USERCOPY
38893+ kfree(temp);
38894+#endif
38895+
38896+ if (err)
38897 return -EFAULT;
38898 buf += sz;
38899 p += sz;
38900@@ -800,6 +849,9 @@ static const struct memdev {
38901 #ifdef CONFIG_PRINTK
38902 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38903 #endif
38904+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38905+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38906+#endif
38907 };
38908
38909 static int memory_open(struct inode *inode, struct file *filp)
38910@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38911 continue;
38912
38913 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38914- NULL, devlist[minor].name);
38915+ NULL, "%s", devlist[minor].name);
38916 }
38917
38918 return tty_init();
38919diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38920index 9df78e2..01ba9ae 100644
38921--- a/drivers/char/nvram.c
38922+++ b/drivers/char/nvram.c
38923@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38924
38925 spin_unlock_irq(&rtc_lock);
38926
38927- if (copy_to_user(buf, contents, tmp - contents))
38928+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38929 return -EFAULT;
38930
38931 *ppos = i;
38932diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38933index 0ea9986..e7b07e4 100644
38934--- a/drivers/char/pcmcia/synclink_cs.c
38935+++ b/drivers/char/pcmcia/synclink_cs.c
38936@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38937
38938 if (debug_level >= DEBUG_LEVEL_INFO)
38939 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38940- __FILE__, __LINE__, info->device_name, port->count);
38941+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38942
38943 if (tty_port_close_start(port, tty, filp) == 0)
38944 goto cleanup;
38945@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38946 cleanup:
38947 if (debug_level >= DEBUG_LEVEL_INFO)
38948 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38949- tty->driver->name, port->count);
38950+ tty->driver->name, atomic_read(&port->count));
38951 }
38952
38953 /* Wait until the transmitter is empty.
38954@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38955
38956 if (debug_level >= DEBUG_LEVEL_INFO)
38957 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38958- __FILE__, __LINE__, tty->driver->name, port->count);
38959+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38960
38961 /* If port is closing, signal caller to try again */
38962 if (port->flags & ASYNC_CLOSING){
38963@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38964 goto cleanup;
38965 }
38966 spin_lock(&port->lock);
38967- port->count++;
38968+ atomic_inc(&port->count);
38969 spin_unlock(&port->lock);
38970 spin_unlock_irqrestore(&info->netlock, flags);
38971
38972- if (port->count == 1) {
38973+ if (atomic_read(&port->count) == 1) {
38974 /* 1st open on this device, init hardware */
38975 retval = startup(info, tty);
38976 if (retval < 0)
38977@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38978 unsigned short new_crctype;
38979
38980 /* return error if TTY interface open */
38981- if (info->port.count)
38982+ if (atomic_read(&info->port.count))
38983 return -EBUSY;
38984
38985 switch (encoding)
38986@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
38987
38988 /* arbitrate between network and tty opens */
38989 spin_lock_irqsave(&info->netlock, flags);
38990- if (info->port.count != 0 || info->netcount != 0) {
38991+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38992 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38993 spin_unlock_irqrestore(&info->netlock, flags);
38994 return -EBUSY;
38995@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38996 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38997
38998 /* return error if TTY interface open */
38999- if (info->port.count)
39000+ if (atomic_read(&info->port.count))
39001 return -EBUSY;
39002
39003 if (cmd != SIOCWANDEV)
39004diff --git a/drivers/char/random.c b/drivers/char/random.c
39005index 9cd6968..6416f00 100644
39006--- a/drivers/char/random.c
39007+++ b/drivers/char/random.c
39008@@ -289,9 +289,6 @@
39009 /*
39010 * To allow fractional bits to be tracked, the entropy_count field is
39011 * denominated in units of 1/8th bits.
39012- *
39013- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39014- * credit_entropy_bits() needs to be 64 bits wide.
39015 */
39016 #define ENTROPY_SHIFT 3
39017 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39018@@ -439,9 +436,9 @@ struct entropy_store {
39019 };
39020
39021 static void push_to_pool(struct work_struct *work);
39022-static __u32 input_pool_data[INPUT_POOL_WORDS];
39023-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39024-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39025+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39026+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39027+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39028
39029 static struct entropy_store input_pool = {
39030 .poolinfo = &poolinfo_table[0],
39031@@ -635,7 +632,7 @@ retry:
39032 /* The +2 corresponds to the /4 in the denominator */
39033
39034 do {
39035- unsigned int anfrac = min(pnfrac, pool_size/2);
39036+ u64 anfrac = min(pnfrac, pool_size/2);
39037 unsigned int add =
39038 ((pool_size - entropy_count)*anfrac*3) >> s;
39039
39040@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39041
39042 extract_buf(r, tmp);
39043 i = min_t(int, nbytes, EXTRACT_SIZE);
39044- if (copy_to_user(buf, tmp, i)) {
39045+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39046 ret = -EFAULT;
39047 break;
39048 }
39049@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39050 static int proc_do_uuid(struct ctl_table *table, int write,
39051 void __user *buffer, size_t *lenp, loff_t *ppos)
39052 {
39053- struct ctl_table fake_table;
39054+ ctl_table_no_const fake_table;
39055 unsigned char buf[64], tmp_uuid[16], *uuid;
39056
39057 uuid = table->data;
39058@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39059 static int proc_do_entropy(struct ctl_table *table, int write,
39060 void __user *buffer, size_t *lenp, loff_t *ppos)
39061 {
39062- struct ctl_table fake_table;
39063+ ctl_table_no_const fake_table;
39064 int entropy_count;
39065
39066 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39067diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39068index e496dae..b793e7d 100644
39069--- a/drivers/char/sonypi.c
39070+++ b/drivers/char/sonypi.c
39071@@ -54,6 +54,7 @@
39072
39073 #include <asm/uaccess.h>
39074 #include <asm/io.h>
39075+#include <asm/local.h>
39076
39077 #include <linux/sonypi.h>
39078
39079@@ -490,7 +491,7 @@ static struct sonypi_device {
39080 spinlock_t fifo_lock;
39081 wait_queue_head_t fifo_proc_list;
39082 struct fasync_struct *fifo_async;
39083- int open_count;
39084+ local_t open_count;
39085 int model;
39086 struct input_dev *input_jog_dev;
39087 struct input_dev *input_key_dev;
39088@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39089 static int sonypi_misc_release(struct inode *inode, struct file *file)
39090 {
39091 mutex_lock(&sonypi_device.lock);
39092- sonypi_device.open_count--;
39093+ local_dec(&sonypi_device.open_count);
39094 mutex_unlock(&sonypi_device.lock);
39095 return 0;
39096 }
39097@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39098 {
39099 mutex_lock(&sonypi_device.lock);
39100 /* Flush input queue on first open */
39101- if (!sonypi_device.open_count)
39102+ if (!local_read(&sonypi_device.open_count))
39103 kfifo_reset(&sonypi_device.fifo);
39104- sonypi_device.open_count++;
39105+ local_inc(&sonypi_device.open_count);
39106 mutex_unlock(&sonypi_device.lock);
39107
39108 return 0;
39109diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39110index 565a947..dcdc06e 100644
39111--- a/drivers/char/tpm/tpm_acpi.c
39112+++ b/drivers/char/tpm/tpm_acpi.c
39113@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39114 virt = acpi_os_map_iomem(start, len);
39115 if (!virt) {
39116 kfree(log->bios_event_log);
39117+ log->bios_event_log = NULL;
39118 printk("%s: ERROR - Unable to map memory\n", __func__);
39119 return -EIO;
39120 }
39121
39122- memcpy_fromio(log->bios_event_log, virt, len);
39123+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39124
39125 acpi_os_unmap_iomem(virt, len);
39126 return 0;
39127diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39128index 3a56a13..f8cbd25 100644
39129--- a/drivers/char/tpm/tpm_eventlog.c
39130+++ b/drivers/char/tpm/tpm_eventlog.c
39131@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39132 event = addr;
39133
39134 if ((event->event_type == 0 && event->event_size == 0) ||
39135- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39136+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39137 return NULL;
39138
39139 return addr;
39140@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39141 return NULL;
39142
39143 if ((event->event_type == 0 && event->event_size == 0) ||
39144- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39145+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39146 return NULL;
39147
39148 (*pos)++;
39149@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39150 int i;
39151
39152 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39153- seq_putc(m, data[i]);
39154+ if (!seq_putc(m, data[i]))
39155+ return -EFAULT;
39156
39157 return 0;
39158 }
39159diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39160index c3aac4c..88de09f9 100644
39161--- a/drivers/char/virtio_console.c
39162+++ b/drivers/char/virtio_console.c
39163@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39164 if (to_user) {
39165 ssize_t ret;
39166
39167- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39168+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39169 if (ret)
39170 return -EFAULT;
39171 } else {
39172@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39173 if (!port_has_data(port) && !port->host_connected)
39174 return 0;
39175
39176- return fill_readbuf(port, ubuf, count, true);
39177+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39178 }
39179
39180 static int wait_port_writable(struct port *port, bool nonblock)
39181diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39182index 4386697..754ceca 100644
39183--- a/drivers/clk/clk-composite.c
39184+++ b/drivers/clk/clk-composite.c
39185@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39186 struct clk *clk;
39187 struct clk_init_data init;
39188 struct clk_composite *composite;
39189- struct clk_ops *clk_composite_ops;
39190+ clk_ops_no_const *clk_composite_ops;
39191
39192 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39193 if (!composite) {
39194diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39195index dd3a78c..386d49c 100644
39196--- a/drivers/clk/socfpga/clk-gate.c
39197+++ b/drivers/clk/socfpga/clk-gate.c
39198@@ -22,6 +22,7 @@
39199 #include <linux/mfd/syscon.h>
39200 #include <linux/of.h>
39201 #include <linux/regmap.h>
39202+#include <asm/pgtable.h>
39203
39204 #include "clk.h"
39205
39206@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39207 return 0;
39208 }
39209
39210-static struct clk_ops gateclk_ops = {
39211+static clk_ops_no_const gateclk_ops __read_only = {
39212 .prepare = socfpga_clk_prepare,
39213 .recalc_rate = socfpga_clk_recalc_rate,
39214 .get_parent = socfpga_clk_get_parent,
39215@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39216 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39217 socfpga_clk->hw.bit_idx = clk_gate[1];
39218
39219- gateclk_ops.enable = clk_gate_ops.enable;
39220- gateclk_ops.disable = clk_gate_ops.disable;
39221+ pax_open_kernel();
39222+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39223+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39224+ pax_close_kernel();
39225 }
39226
39227 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39228diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39229index de6da95..c98278b 100644
39230--- a/drivers/clk/socfpga/clk-pll.c
39231+++ b/drivers/clk/socfpga/clk-pll.c
39232@@ -21,6 +21,7 @@
39233 #include <linux/io.h>
39234 #include <linux/of.h>
39235 #include <linux/of_address.h>
39236+#include <asm/pgtable.h>
39237
39238 #include "clk.h"
39239
39240@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39241 CLK_MGR_PLL_CLK_SRC_MASK;
39242 }
39243
39244-static struct clk_ops clk_pll_ops = {
39245+static clk_ops_no_const clk_pll_ops __read_only = {
39246 .recalc_rate = clk_pll_recalc_rate,
39247 .get_parent = clk_pll_get_parent,
39248 };
39249@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39250 pll_clk->hw.hw.init = &init;
39251
39252 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39253- clk_pll_ops.enable = clk_gate_ops.enable;
39254- clk_pll_ops.disable = clk_gate_ops.disable;
39255+ pax_open_kernel();
39256+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39257+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39258+ pax_close_kernel();
39259
39260 clk = clk_register(NULL, &pll_clk->hw.hw);
39261 if (WARN_ON(IS_ERR(clk))) {
39262diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39263index b0c18ed..1713a80 100644
39264--- a/drivers/cpufreq/acpi-cpufreq.c
39265+++ b/drivers/cpufreq/acpi-cpufreq.c
39266@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39267 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39268 per_cpu(acfreq_data, cpu) = data;
39269
39270- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39271- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39272+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39273+ pax_open_kernel();
39274+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39275+ pax_close_kernel();
39276+ }
39277
39278 result = acpi_processor_register_performance(data->acpi_data, cpu);
39279 if (result)
39280@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39281 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39282 break;
39283 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39284- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39285+ pax_open_kernel();
39286+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39287+ pax_close_kernel();
39288 break;
39289 default:
39290 break;
39291@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39292 if (!msrs)
39293 return;
39294
39295- acpi_cpufreq_driver.boost_supported = true;
39296- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39297+ pax_open_kernel();
39298+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39299+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39300+ pax_close_kernel();
39301
39302 cpu_notifier_register_begin();
39303
39304diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39305index fde97d6..3631eca 100644
39306--- a/drivers/cpufreq/cpufreq-dt.c
39307+++ b/drivers/cpufreq/cpufreq-dt.c
39308@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39309 if (!IS_ERR(cpu_reg))
39310 regulator_put(cpu_reg);
39311
39312- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39313+ pax_open_kernel();
39314+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39315+ pax_close_kernel();
39316
39317 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39318 if (ret)
39319diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39320index 7030c40..3a97de6 100644
39321--- a/drivers/cpufreq/cpufreq.c
39322+++ b/drivers/cpufreq/cpufreq.c
39323@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39324 }
39325
39326 mutex_lock(&cpufreq_governor_mutex);
39327- list_del(&governor->governor_list);
39328+ pax_list_del(&governor->governor_list);
39329 mutex_unlock(&cpufreq_governor_mutex);
39330 return;
39331 }
39332@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39333 return NOTIFY_OK;
39334 }
39335
39336-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39337+static struct notifier_block cpufreq_cpu_notifier = {
39338 .notifier_call = cpufreq_cpu_callback,
39339 };
39340
39341@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
39342 return 0;
39343
39344 write_lock_irqsave(&cpufreq_driver_lock, flags);
39345- cpufreq_driver->boost_enabled = state;
39346+ pax_open_kernel();
39347+ *(bool *)&cpufreq_driver->boost_enabled = state;
39348+ pax_close_kernel();
39349 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39350
39351 ret = cpufreq_driver->set_boost(state);
39352 if (ret) {
39353 write_lock_irqsave(&cpufreq_driver_lock, flags);
39354- cpufreq_driver->boost_enabled = !state;
39355+ pax_open_kernel();
39356+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39357+ pax_close_kernel();
39358 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39359
39360 pr_err("%s: Cannot %s BOOST\n",
39361@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39362
39363 pr_debug("trying to register driver %s\n", driver_data->name);
39364
39365- if (driver_data->setpolicy)
39366- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39367+ if (driver_data->setpolicy) {
39368+ pax_open_kernel();
39369+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39370+ pax_close_kernel();
39371+ }
39372
39373 write_lock_irqsave(&cpufreq_driver_lock, flags);
39374 if (cpufreq_driver) {
39375@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39376 * Check if driver provides function to enable boost -
39377 * if not, use cpufreq_boost_set_sw as default
39378 */
39379- if (!cpufreq_driver->set_boost)
39380- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39381+ if (!cpufreq_driver->set_boost) {
39382+ pax_open_kernel();
39383+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39384+ pax_close_kernel();
39385+ }
39386
39387 ret = cpufreq_sysfs_create_file(&boost.attr);
39388 if (ret) {
39389diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39390index 1b44496..b80ff5e 100644
39391--- a/drivers/cpufreq/cpufreq_governor.c
39392+++ b/drivers/cpufreq/cpufreq_governor.c
39393@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39394 struct dbs_data *dbs_data;
39395 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39396 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39397- struct od_ops *od_ops = NULL;
39398+ const struct od_ops *od_ops = NULL;
39399 struct od_dbs_tuners *od_tuners = NULL;
39400 struct cs_dbs_tuners *cs_tuners = NULL;
39401 struct cpu_dbs_common_info *cpu_cdbs;
39402@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39403
39404 if ((cdata->governor == GOV_CONSERVATIVE) &&
39405 (!policy->governor->initialized)) {
39406- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39407+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39408
39409 cpufreq_register_notifier(cs_ops->notifier_block,
39410 CPUFREQ_TRANSITION_NOTIFIER);
39411@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39412
39413 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39414 (policy->governor->initialized == 1)) {
39415- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39416+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39417
39418 cpufreq_unregister_notifier(cs_ops->notifier_block,
39419 CPUFREQ_TRANSITION_NOTIFIER);
39420diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39421index cc401d1..8197340 100644
39422--- a/drivers/cpufreq/cpufreq_governor.h
39423+++ b/drivers/cpufreq/cpufreq_governor.h
39424@@ -212,7 +212,7 @@ struct common_dbs_data {
39425 void (*exit)(struct dbs_data *dbs_data);
39426
39427 /* Governor specific ops, see below */
39428- void *gov_ops;
39429+ const void *gov_ops;
39430 };
39431
39432 /* Governor Per policy data */
39433@@ -232,7 +232,7 @@ struct od_ops {
39434 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39435 unsigned int freq_next, unsigned int relation);
39436 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39437-};
39438+} __no_const;
39439
39440 struct cs_ops {
39441 struct notifier_block *notifier_block;
39442diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39443index ad3f38f..8f086cd 100644
39444--- a/drivers/cpufreq/cpufreq_ondemand.c
39445+++ b/drivers/cpufreq/cpufreq_ondemand.c
39446@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39447
39448 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39449
39450-static struct od_ops od_ops = {
39451+static struct od_ops od_ops __read_only = {
39452 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39453 .powersave_bias_target = generic_powersave_bias_target,
39454 .freq_increase = dbs_freq_increase,
39455@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39456 (struct cpufreq_policy *, unsigned int, unsigned int),
39457 unsigned int powersave_bias)
39458 {
39459- od_ops.powersave_bias_target = f;
39460+ pax_open_kernel();
39461+ *(void **)&od_ops.powersave_bias_target = f;
39462+ pax_close_kernel();
39463 od_set_powersave_bias(powersave_bias);
39464 }
39465 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39466
39467 void od_unregister_powersave_bias_handler(void)
39468 {
39469- od_ops.powersave_bias_target = generic_powersave_bias_target;
39470+ pax_open_kernel();
39471+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39472+ pax_close_kernel();
39473 od_set_powersave_bias(0);
39474 }
39475 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39476diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39477index 742eefb..e2fcfc8 100644
39478--- a/drivers/cpufreq/intel_pstate.c
39479+++ b/drivers/cpufreq/intel_pstate.c
39480@@ -133,10 +133,10 @@ struct pstate_funcs {
39481 struct cpu_defaults {
39482 struct pstate_adjust_policy pid_policy;
39483 struct pstate_funcs funcs;
39484-};
39485+} __do_const;
39486
39487 static struct pstate_adjust_policy pid_params;
39488-static struct pstate_funcs pstate_funcs;
39489+static struct pstate_funcs *pstate_funcs;
39490 static int hwp_active;
39491
39492 struct perf_limits {
39493@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39494
39495 cpu->pstate.current_pstate = pstate;
39496
39497- pstate_funcs.set(cpu, pstate);
39498+ pstate_funcs->set(cpu, pstate);
39499 }
39500
39501 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39502 {
39503- cpu->pstate.min_pstate = pstate_funcs.get_min();
39504- cpu->pstate.max_pstate = pstate_funcs.get_max();
39505- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39506- cpu->pstate.scaling = pstate_funcs.get_scaling();
39507+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39508+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39509+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39510+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39511
39512- if (pstate_funcs.get_vid)
39513- pstate_funcs.get_vid(cpu);
39514+ if (pstate_funcs->get_vid)
39515+ pstate_funcs->get_vid(cpu);
39516 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39517 }
39518
39519@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39520 rdmsrl(MSR_IA32_APERF, aperf);
39521 rdmsrl(MSR_IA32_MPERF, mperf);
39522
39523- if (!pstate_funcs.get_max() ||
39524- !pstate_funcs.get_min() ||
39525- !pstate_funcs.get_turbo())
39526+ if (!pstate_funcs->get_max() ||
39527+ !pstate_funcs->get_min() ||
39528+ !pstate_funcs->get_turbo())
39529 return -ENODEV;
39530
39531 rdmsrl(MSR_IA32_APERF, tmp);
39532@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39533 return 0;
39534 }
39535
39536-static void copy_pid_params(struct pstate_adjust_policy *policy)
39537+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39538 {
39539 pid_params.sample_rate_ms = policy->sample_rate_ms;
39540 pid_params.p_gain_pct = policy->p_gain_pct;
39541@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39542
39543 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39544 {
39545- pstate_funcs.get_max = funcs->get_max;
39546- pstate_funcs.get_min = funcs->get_min;
39547- pstate_funcs.get_turbo = funcs->get_turbo;
39548- pstate_funcs.get_scaling = funcs->get_scaling;
39549- pstate_funcs.set = funcs->set;
39550- pstate_funcs.get_vid = funcs->get_vid;
39551+ pstate_funcs = funcs;
39552 }
39553
39554 #if IS_ENABLED(CONFIG_ACPI)
39555diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39556index 529cfd9..0e28fff 100644
39557--- a/drivers/cpufreq/p4-clockmod.c
39558+++ b/drivers/cpufreq/p4-clockmod.c
39559@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39560 case 0x0F: /* Core Duo */
39561 case 0x16: /* Celeron Core */
39562 case 0x1C: /* Atom */
39563- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39564+ pax_open_kernel();
39565+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39566+ pax_close_kernel();
39567 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39568 case 0x0D: /* Pentium M (Dothan) */
39569- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39570+ pax_open_kernel();
39571+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39572+ pax_close_kernel();
39573 /* fall through */
39574 case 0x09: /* Pentium M (Banias) */
39575 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39576@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39577
39578 /* on P-4s, the TSC runs with constant frequency independent whether
39579 * throttling is active or not. */
39580- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39581+ pax_open_kernel();
39582+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39583+ pax_close_kernel();
39584
39585 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39586 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39587diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39588index 9bb42ba..b01b4a2 100644
39589--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39590+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39591@@ -18,14 +18,12 @@
39592 #include <asm/head.h>
39593 #include <asm/timer.h>
39594
39595-static struct cpufreq_driver *cpufreq_us3_driver;
39596-
39597 struct us3_freq_percpu_info {
39598 struct cpufreq_frequency_table table[4];
39599 };
39600
39601 /* Indexed by cpu number. */
39602-static struct us3_freq_percpu_info *us3_freq_table;
39603+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39604
39605 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39606 * in the Safari config register.
39607@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39608
39609 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39610 {
39611- if (cpufreq_us3_driver)
39612- us3_freq_target(policy, 0);
39613+ us3_freq_target(policy, 0);
39614
39615 return 0;
39616 }
39617
39618+static int __init us3_freq_init(void);
39619+static void __exit us3_freq_exit(void);
39620+
39621+static struct cpufreq_driver cpufreq_us3_driver = {
39622+ .init = us3_freq_cpu_init,
39623+ .verify = cpufreq_generic_frequency_table_verify,
39624+ .target_index = us3_freq_target,
39625+ .get = us3_freq_get,
39626+ .exit = us3_freq_cpu_exit,
39627+ .name = "UltraSPARC-III",
39628+
39629+};
39630+
39631 static int __init us3_freq_init(void)
39632 {
39633 unsigned long manuf, impl, ver;
39634- int ret;
39635
39636 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39637 return -ENODEV;
39638@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39639 (impl == CHEETAH_IMPL ||
39640 impl == CHEETAH_PLUS_IMPL ||
39641 impl == JAGUAR_IMPL ||
39642- impl == PANTHER_IMPL)) {
39643- struct cpufreq_driver *driver;
39644-
39645- ret = -ENOMEM;
39646- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39647- if (!driver)
39648- goto err_out;
39649-
39650- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39651- GFP_KERNEL);
39652- if (!us3_freq_table)
39653- goto err_out;
39654-
39655- driver->init = us3_freq_cpu_init;
39656- driver->verify = cpufreq_generic_frequency_table_verify;
39657- driver->target_index = us3_freq_target;
39658- driver->get = us3_freq_get;
39659- driver->exit = us3_freq_cpu_exit;
39660- strcpy(driver->name, "UltraSPARC-III");
39661-
39662- cpufreq_us3_driver = driver;
39663- ret = cpufreq_register_driver(driver);
39664- if (ret)
39665- goto err_out;
39666-
39667- return 0;
39668-
39669-err_out:
39670- if (driver) {
39671- kfree(driver);
39672- cpufreq_us3_driver = NULL;
39673- }
39674- kfree(us3_freq_table);
39675- us3_freq_table = NULL;
39676- return ret;
39677- }
39678+ impl == PANTHER_IMPL))
39679+ return cpufreq_register_driver(&cpufreq_us3_driver);
39680
39681 return -ENODEV;
39682 }
39683
39684 static void __exit us3_freq_exit(void)
39685 {
39686- if (cpufreq_us3_driver) {
39687- cpufreq_unregister_driver(cpufreq_us3_driver);
39688- kfree(cpufreq_us3_driver);
39689- cpufreq_us3_driver = NULL;
39690- kfree(us3_freq_table);
39691- us3_freq_table = NULL;
39692- }
39693+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39694 }
39695
39696 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39697diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39698index 7d4a315..21bb886 100644
39699--- a/drivers/cpufreq/speedstep-centrino.c
39700+++ b/drivers/cpufreq/speedstep-centrino.c
39701@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39702 !cpu_has(cpu, X86_FEATURE_EST))
39703 return -ENODEV;
39704
39705- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39706- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39707+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39708+ pax_open_kernel();
39709+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39710+ pax_close_kernel();
39711+ }
39712
39713 if (policy->cpu != 0)
39714 return -ENODEV;
39715diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39716index 2697e87..c32476c 100644
39717--- a/drivers/cpuidle/driver.c
39718+++ b/drivers/cpuidle/driver.c
39719@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39720
39721 static void poll_idle_init(struct cpuidle_driver *drv)
39722 {
39723- struct cpuidle_state *state = &drv->states[0];
39724+ cpuidle_state_no_const *state = &drv->states[0];
39725
39726 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39727 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39728diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39729index fb9f511..213e6cc 100644
39730--- a/drivers/cpuidle/governor.c
39731+++ b/drivers/cpuidle/governor.c
39732@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39733 mutex_lock(&cpuidle_lock);
39734 if (__cpuidle_find_governor(gov->name) == NULL) {
39735 ret = 0;
39736- list_add_tail(&gov->governor_list, &cpuidle_governors);
39737+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39738 if (!cpuidle_curr_governor ||
39739 cpuidle_curr_governor->rating < gov->rating)
39740 cpuidle_switch_governor(gov);
39741diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39742index 97c5903..023ad23 100644
39743--- a/drivers/cpuidle/sysfs.c
39744+++ b/drivers/cpuidle/sysfs.c
39745@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39746 NULL
39747 };
39748
39749-static struct attribute_group cpuidle_attr_group = {
39750+static attribute_group_no_const cpuidle_attr_group = {
39751 .attrs = cpuidle_default_attrs,
39752 .name = "cpuidle",
39753 };
39754diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39755index 8d2a772..33826c9 100644
39756--- a/drivers/crypto/hifn_795x.c
39757+++ b/drivers/crypto/hifn_795x.c
39758@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39759 MODULE_PARM_DESC(hifn_pll_ref,
39760 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39761
39762-static atomic_t hifn_dev_number;
39763+static atomic_unchecked_t hifn_dev_number;
39764
39765 #define ACRYPTO_OP_DECRYPT 0
39766 #define ACRYPTO_OP_ENCRYPT 1
39767@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39768 goto err_out_disable_pci_device;
39769
39770 snprintf(name, sizeof(name), "hifn%d",
39771- atomic_inc_return(&hifn_dev_number)-1);
39772+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39773
39774 err = pci_request_regions(pdev, name);
39775 if (err)
39776diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39777index 30b538d8..1610d75 100644
39778--- a/drivers/devfreq/devfreq.c
39779+++ b/drivers/devfreq/devfreq.c
39780@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39781 goto err_out;
39782 }
39783
39784- list_add(&governor->node, &devfreq_governor_list);
39785+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39786
39787 list_for_each_entry(devfreq, &devfreq_list, node) {
39788 int ret = 0;
39789@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39790 }
39791 }
39792
39793- list_del(&governor->node);
39794+ pax_list_del((struct list_head *)&governor->node);
39795 err_out:
39796 mutex_unlock(&devfreq_list_lock);
39797
39798diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39799index 3a2adb1..b3be9a3 100644
39800--- a/drivers/dma/sh/shdma-base.c
39801+++ b/drivers/dma/sh/shdma-base.c
39802@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39803 schan->slave_id = -EINVAL;
39804 }
39805
39806- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39807- sdev->desc_size, GFP_KERNEL);
39808+ schan->desc = kcalloc(sdev->desc_size,
39809+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39810 if (!schan->desc) {
39811 ret = -ENOMEM;
39812 goto edescalloc;
39813diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39814index aec8a84..7b45a1f 100644
39815--- a/drivers/dma/sh/shdmac.c
39816+++ b/drivers/dma/sh/shdmac.c
39817@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39818 return ret;
39819 }
39820
39821-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39822+static struct notifier_block sh_dmae_nmi_notifier = {
39823 .notifier_call = sh_dmae_nmi_handler,
39824
39825 /* Run before NMI debug handler and KGDB */
39826diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39827index 592af5f..bb1d583 100644
39828--- a/drivers/edac/edac_device.c
39829+++ b/drivers/edac/edac_device.c
39830@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39831 */
39832 int edac_device_alloc_index(void)
39833 {
39834- static atomic_t device_indexes = ATOMIC_INIT(0);
39835+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39836
39837- return atomic_inc_return(&device_indexes) - 1;
39838+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39839 }
39840 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39841
39842diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39843index 670d282..6675f4d 100644
39844--- a/drivers/edac/edac_mc_sysfs.c
39845+++ b/drivers/edac/edac_mc_sysfs.c
39846@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39847 struct dev_ch_attribute {
39848 struct device_attribute attr;
39849 int channel;
39850-};
39851+} __do_const;
39852
39853 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39854 struct dev_ch_attribute dev_attr_legacy_##_name = \
39855@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39856 }
39857
39858 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39859+ pax_open_kernel();
39860 if (mci->get_sdram_scrub_rate) {
39861- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39862- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39863+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39864+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39865 }
39866 if (mci->set_sdram_scrub_rate) {
39867- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39868- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39869+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39870+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39871 }
39872+ pax_close_kernel();
39873 err = device_create_file(&mci->dev,
39874 &dev_attr_sdram_scrub_rate);
39875 if (err) {
39876diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39877index 2cf44b4d..6dd2dc7 100644
39878--- a/drivers/edac/edac_pci.c
39879+++ b/drivers/edac/edac_pci.c
39880@@ -29,7 +29,7 @@
39881
39882 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39883 static LIST_HEAD(edac_pci_list);
39884-static atomic_t pci_indexes = ATOMIC_INIT(0);
39885+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39886
39887 /*
39888 * edac_pci_alloc_ctl_info
39889@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39890 */
39891 int edac_pci_alloc_index(void)
39892 {
39893- return atomic_inc_return(&pci_indexes) - 1;
39894+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39895 }
39896 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39897
39898diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39899index 24d877f..4e30133 100644
39900--- a/drivers/edac/edac_pci_sysfs.c
39901+++ b/drivers/edac/edac_pci_sysfs.c
39902@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39903 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39904 static int edac_pci_poll_msec = 1000; /* one second workq period */
39905
39906-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39907-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39908+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39909+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39910
39911 static struct kobject *edac_pci_top_main_kobj;
39912 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39913@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39914 void *value;
39915 ssize_t(*show) (void *, char *);
39916 ssize_t(*store) (void *, const char *, size_t);
39917-};
39918+} __do_const;
39919
39920 /* Set of show/store abstract level functions for PCI Parity object */
39921 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39922@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39923 edac_printk(KERN_CRIT, EDAC_PCI,
39924 "Signaled System Error on %s\n",
39925 pci_name(dev));
39926- atomic_inc(&pci_nonparity_count);
39927+ atomic_inc_unchecked(&pci_nonparity_count);
39928 }
39929
39930 if (status & (PCI_STATUS_PARITY)) {
39931@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39932 "Master Data Parity Error on %s\n",
39933 pci_name(dev));
39934
39935- atomic_inc(&pci_parity_count);
39936+ atomic_inc_unchecked(&pci_parity_count);
39937 }
39938
39939 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39940@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39941 "Detected Parity Error on %s\n",
39942 pci_name(dev));
39943
39944- atomic_inc(&pci_parity_count);
39945+ atomic_inc_unchecked(&pci_parity_count);
39946 }
39947 }
39948
39949@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39950 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39951 "Signaled System Error on %s\n",
39952 pci_name(dev));
39953- atomic_inc(&pci_nonparity_count);
39954+ atomic_inc_unchecked(&pci_nonparity_count);
39955 }
39956
39957 if (status & (PCI_STATUS_PARITY)) {
39958@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39959 "Master Data Parity Error on "
39960 "%s\n", pci_name(dev));
39961
39962- atomic_inc(&pci_parity_count);
39963+ atomic_inc_unchecked(&pci_parity_count);
39964 }
39965
39966 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39967@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39968 "Detected Parity Error on %s\n",
39969 pci_name(dev));
39970
39971- atomic_inc(&pci_parity_count);
39972+ atomic_inc_unchecked(&pci_parity_count);
39973 }
39974 }
39975 }
39976@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
39977 if (!check_pci_errors)
39978 return;
39979
39980- before_count = atomic_read(&pci_parity_count);
39981+ before_count = atomic_read_unchecked(&pci_parity_count);
39982
39983 /* scan all PCI devices looking for a Parity Error on devices and
39984 * bridges.
39985@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
39986 /* Only if operator has selected panic on PCI Error */
39987 if (edac_pci_get_panic_on_pe()) {
39988 /* If the count is different 'after' from 'before' */
39989- if (before_count != atomic_read(&pci_parity_count))
39990+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39991 panic("EDAC: PCI Parity Error");
39992 }
39993 }
39994diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39995index c2359a1..8bd119d 100644
39996--- a/drivers/edac/mce_amd.h
39997+++ b/drivers/edac/mce_amd.h
39998@@ -74,7 +74,7 @@ struct amd_decoder_ops {
39999 bool (*mc0_mce)(u16, u8);
40000 bool (*mc1_mce)(u16, u8);
40001 bool (*mc2_mce)(u16, u8);
40002-};
40003+} __no_const;
40004
40005 void amd_report_gart_errors(bool);
40006 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40007diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40008index 57ea7f4..af06b76 100644
40009--- a/drivers/firewire/core-card.c
40010+++ b/drivers/firewire/core-card.c
40011@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40012 const struct fw_card_driver *driver,
40013 struct device *device)
40014 {
40015- static atomic_t index = ATOMIC_INIT(-1);
40016+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40017
40018- card->index = atomic_inc_return(&index);
40019+ card->index = atomic_inc_return_unchecked(&index);
40020 card->driver = driver;
40021 card->device = device;
40022 card->current_tlabel = 0;
40023@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40024
40025 void fw_core_remove_card(struct fw_card *card)
40026 {
40027- struct fw_card_driver dummy_driver = dummy_driver_template;
40028+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40029
40030 card->driver->update_phy_reg(card, 4,
40031 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40032diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40033index f9e3aee..269dbdb 100644
40034--- a/drivers/firewire/core-device.c
40035+++ b/drivers/firewire/core-device.c
40036@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40037 struct config_rom_attribute {
40038 struct device_attribute attr;
40039 u32 key;
40040-};
40041+} __do_const;
40042
40043 static ssize_t show_immediate(struct device *dev,
40044 struct device_attribute *dattr, char *buf)
40045diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40046index eb6935c..3cc2bfa 100644
40047--- a/drivers/firewire/core-transaction.c
40048+++ b/drivers/firewire/core-transaction.c
40049@@ -38,6 +38,7 @@
40050 #include <linux/timer.h>
40051 #include <linux/types.h>
40052 #include <linux/workqueue.h>
40053+#include <linux/sched.h>
40054
40055 #include <asm/byteorder.h>
40056
40057diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40058index e1480ff6..1a429bd 100644
40059--- a/drivers/firewire/core.h
40060+++ b/drivers/firewire/core.h
40061@@ -111,6 +111,7 @@ struct fw_card_driver {
40062
40063 int (*stop_iso)(struct fw_iso_context *ctx);
40064 };
40065+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40066
40067 void fw_card_initialize(struct fw_card *card,
40068 const struct fw_card_driver *driver, struct device *device);
40069diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40070index aff9018..fc87ded 100644
40071--- a/drivers/firewire/ohci.c
40072+++ b/drivers/firewire/ohci.c
40073@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
40074 be32_to_cpu(ohci->next_header));
40075 }
40076
40077+#ifndef CONFIG_GRKERNSEC
40078 if (param_remote_dma) {
40079 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40080 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40081 }
40082+#endif
40083
40084 spin_unlock_irq(&ohci->lock);
40085
40086@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40087 unsigned long flags;
40088 int n, ret = 0;
40089
40090+#ifndef CONFIG_GRKERNSEC
40091 if (param_remote_dma)
40092 return 0;
40093+#endif
40094
40095 /*
40096 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40097diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40098index 94a58a0..f5eba42 100644
40099--- a/drivers/firmware/dmi-id.c
40100+++ b/drivers/firmware/dmi-id.c
40101@@ -16,7 +16,7 @@
40102 struct dmi_device_attribute{
40103 struct device_attribute dev_attr;
40104 int field;
40105-};
40106+} __do_const;
40107 #define to_dmi_dev_attr(_dev_attr) \
40108 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40109
40110diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40111index 69fac06..820f0c9a 100644
40112--- a/drivers/firmware/dmi_scan.c
40113+++ b/drivers/firmware/dmi_scan.c
40114@@ -901,7 +901,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40115 if (buf == NULL)
40116 return -1;
40117
40118- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40119+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40120
40121 dmi_unmap(buf);
40122 return 0;
40123diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40124index 4fd9961..52d60ce 100644
40125--- a/drivers/firmware/efi/cper.c
40126+++ b/drivers/firmware/efi/cper.c
40127@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40128 */
40129 u64 cper_next_record_id(void)
40130 {
40131- static atomic64_t seq;
40132+ static atomic64_unchecked_t seq;
40133
40134- if (!atomic64_read(&seq))
40135- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40136+ if (!atomic64_read_unchecked(&seq))
40137+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40138
40139- return atomic64_inc_return(&seq);
40140+ return atomic64_inc_return_unchecked(&seq);
40141 }
40142 EXPORT_SYMBOL_GPL(cper_next_record_id);
40143
40144diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40145index 9035c1b..aff45f8 100644
40146--- a/drivers/firmware/efi/efi.c
40147+++ b/drivers/firmware/efi/efi.c
40148@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40149 };
40150
40151 static struct efivars generic_efivars;
40152-static struct efivar_operations generic_ops;
40153+static efivar_operations_no_const generic_ops __read_only;
40154
40155 static int generic_ops_register(void)
40156 {
40157- generic_ops.get_variable = efi.get_variable;
40158- generic_ops.set_variable = efi.set_variable;
40159- generic_ops.get_next_variable = efi.get_next_variable;
40160- generic_ops.query_variable_store = efi_query_variable_store;
40161+ pax_open_kernel();
40162+ *(void **)&generic_ops.get_variable = efi.get_variable;
40163+ *(void **)&generic_ops.set_variable = efi.set_variable;
40164+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40165+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40166+ pax_close_kernel();
40167
40168 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40169 }
40170diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40171index f256ecd..387dcb1 100644
40172--- a/drivers/firmware/efi/efivars.c
40173+++ b/drivers/firmware/efi/efivars.c
40174@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40175 static int
40176 create_efivars_bin_attributes(void)
40177 {
40178- struct bin_attribute *attr;
40179+ bin_attribute_no_const *attr;
40180 int error;
40181
40182 /* new_var */
40183diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40184index 2f569aa..c95f4fb 100644
40185--- a/drivers/firmware/google/memconsole.c
40186+++ b/drivers/firmware/google/memconsole.c
40187@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40188 if (!found_memconsole())
40189 return -ENODEV;
40190
40191- memconsole_bin_attr.size = memconsole_length;
40192+ pax_open_kernel();
40193+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40194+ pax_close_kernel();
40195+
40196 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40197 }
40198
40199diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40200index 3cfcfc6..09d6f117 100644
40201--- a/drivers/gpio/gpio-em.c
40202+++ b/drivers/gpio/gpio-em.c
40203@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40204 struct em_gio_priv *p;
40205 struct resource *io[2], *irq[2];
40206 struct gpio_chip *gpio_chip;
40207- struct irq_chip *irq_chip;
40208+ irq_chip_no_const *irq_chip;
40209 const char *name = dev_name(&pdev->dev);
40210 int ret;
40211
40212diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40213index 7818cd1..1be40e5 100644
40214--- a/drivers/gpio/gpio-ich.c
40215+++ b/drivers/gpio/gpio-ich.c
40216@@ -94,7 +94,7 @@ struct ichx_desc {
40217 * this option allows driver caching written output values
40218 */
40219 bool use_outlvl_cache;
40220-};
40221+} __do_const;
40222
40223 static struct {
40224 spinlock_t lock;
40225diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40226index f476ae2..05e1bdd 100644
40227--- a/drivers/gpio/gpio-omap.c
40228+++ b/drivers/gpio/gpio-omap.c
40229@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40230 const struct omap_gpio_platform_data *pdata;
40231 struct resource *res;
40232 struct gpio_bank *bank;
40233- struct irq_chip *irqc;
40234+ irq_chip_no_const *irqc;
40235 int ret;
40236
40237 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40238diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40239index 584484e..e26ebd6 100644
40240--- a/drivers/gpio/gpio-rcar.c
40241+++ b/drivers/gpio/gpio-rcar.c
40242@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40243 struct gpio_rcar_priv *p;
40244 struct resource *io, *irq;
40245 struct gpio_chip *gpio_chip;
40246- struct irq_chip *irq_chip;
40247+ irq_chip_no_const *irq_chip;
40248 struct device *dev = &pdev->dev;
40249 const char *name = dev_name(dev);
40250 int ret;
40251diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40252index c1caa45..f0f97d2 100644
40253--- a/drivers/gpio/gpio-vr41xx.c
40254+++ b/drivers/gpio/gpio-vr41xx.c
40255@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40256 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40257 maskl, pendl, maskh, pendh);
40258
40259- atomic_inc(&irq_err_count);
40260+ atomic_inc_unchecked(&irq_err_count);
40261
40262 return -EINVAL;
40263 }
40264diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40265index 568aa2b..d1204d8 100644
40266--- a/drivers/gpio/gpiolib.c
40267+++ b/drivers/gpio/gpiolib.c
40268@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40269 }
40270
40271 if (gpiochip->irqchip) {
40272- gpiochip->irqchip->irq_request_resources = NULL;
40273- gpiochip->irqchip->irq_release_resources = NULL;
40274+ pax_open_kernel();
40275+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40276+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40277+ pax_close_kernel();
40278 gpiochip->irqchip = NULL;
40279 }
40280 }
40281@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40282 gpiochip->irqchip = NULL;
40283 return -EINVAL;
40284 }
40285- irqchip->irq_request_resources = gpiochip_irq_reqres;
40286- irqchip->irq_release_resources = gpiochip_irq_relres;
40287+
40288+ pax_open_kernel();
40289+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40290+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40291+ pax_close_kernel();
40292
40293 /*
40294 * Prepare the mapping since the irqchip shall be orthogonal to
40295diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40296index 29168fa..c9baec6 100644
40297--- a/drivers/gpu/drm/drm_crtc.c
40298+++ b/drivers/gpu/drm/drm_crtc.c
40299@@ -3964,7 +3964,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40300 goto done;
40301 }
40302
40303- if (copy_to_user(&enum_ptr[copied].name,
40304+ if (copy_to_user(enum_ptr[copied].name,
40305 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40306 ret = -EFAULT;
40307 goto done;
40308diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40309index 4f41377..ee33f40 100644
40310--- a/drivers/gpu/drm/drm_drv.c
40311+++ b/drivers/gpu/drm/drm_drv.c
40312@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40313
40314 drm_device_set_unplugged(dev);
40315
40316- if (dev->open_count == 0) {
40317+ if (local_read(&dev->open_count) == 0) {
40318 drm_put_dev(dev);
40319 }
40320 mutex_unlock(&drm_global_mutex);
40321diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40322index 0b9514b..6acd174 100644
40323--- a/drivers/gpu/drm/drm_fops.c
40324+++ b/drivers/gpu/drm/drm_fops.c
40325@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40326 return PTR_ERR(minor);
40327
40328 dev = minor->dev;
40329- if (!dev->open_count++)
40330+ if (local_inc_return(&dev->open_count) == 1)
40331 need_setup = 1;
40332
40333 /* share address_space across all char-devs of a single device */
40334@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40335 return 0;
40336
40337 err_undo:
40338- dev->open_count--;
40339+ local_dec(&dev->open_count);
40340 drm_minor_release(minor);
40341 return retcode;
40342 }
40343@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40344
40345 mutex_lock(&drm_global_mutex);
40346
40347- DRM_DEBUG("open_count = %d\n", dev->open_count);
40348+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40349
40350 mutex_lock(&dev->struct_mutex);
40351 list_del(&file_priv->lhead);
40352@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40353 * Begin inline drm_release
40354 */
40355
40356- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40357+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40358 task_pid_nr(current),
40359 (long)old_encode_dev(file_priv->minor->kdev->devt),
40360- dev->open_count);
40361+ local_read(&dev->open_count));
40362
40363 /* Release any auth tokens that might point to this file_priv,
40364 (do that under the drm_global_mutex) */
40365@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40366 * End inline drm_release
40367 */
40368
40369- if (!--dev->open_count) {
40370+ if (local_dec_and_test(&dev->open_count)) {
40371 retcode = drm_lastclose(dev);
40372 if (drm_device_is_unplugged(dev))
40373 drm_put_dev(dev);
40374diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40375index 3d2e91c..d31c4c9 100644
40376--- a/drivers/gpu/drm/drm_global.c
40377+++ b/drivers/gpu/drm/drm_global.c
40378@@ -36,7 +36,7 @@
40379 struct drm_global_item {
40380 struct mutex mutex;
40381 void *object;
40382- int refcount;
40383+ atomic_t refcount;
40384 };
40385
40386 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40387@@ -49,7 +49,7 @@ void drm_global_init(void)
40388 struct drm_global_item *item = &glob[i];
40389 mutex_init(&item->mutex);
40390 item->object = NULL;
40391- item->refcount = 0;
40392+ atomic_set(&item->refcount, 0);
40393 }
40394 }
40395
40396@@ -59,7 +59,7 @@ void drm_global_release(void)
40397 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40398 struct drm_global_item *item = &glob[i];
40399 BUG_ON(item->object != NULL);
40400- BUG_ON(item->refcount != 0);
40401+ BUG_ON(atomic_read(&item->refcount) != 0);
40402 }
40403 }
40404
40405@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40406 struct drm_global_item *item = &glob[ref->global_type];
40407
40408 mutex_lock(&item->mutex);
40409- if (item->refcount == 0) {
40410+ if (atomic_read(&item->refcount) == 0) {
40411 item->object = kzalloc(ref->size, GFP_KERNEL);
40412 if (unlikely(item->object == NULL)) {
40413 ret = -ENOMEM;
40414@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40415 goto out_err;
40416
40417 }
40418- ++item->refcount;
40419+ atomic_inc(&item->refcount);
40420 ref->object = item->object;
40421 mutex_unlock(&item->mutex);
40422 return 0;
40423@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40424 struct drm_global_item *item = &glob[ref->global_type];
40425
40426 mutex_lock(&item->mutex);
40427- BUG_ON(item->refcount == 0);
40428+ BUG_ON(atomic_read(&item->refcount) == 0);
40429 BUG_ON(ref->object != item->object);
40430- if (--item->refcount == 0) {
40431+ if (atomic_dec_and_test(&item->refcount)) {
40432 ref->release(ref);
40433 item->object = NULL;
40434 }
40435diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40436index 51efebd..2b70935 100644
40437--- a/drivers/gpu/drm/drm_info.c
40438+++ b/drivers/gpu/drm/drm_info.c
40439@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40440 struct drm_local_map *map;
40441 struct drm_map_list *r_list;
40442
40443- /* Hardcoded from _DRM_FRAME_BUFFER,
40444- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40445- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40446- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40447+ static const char * const types[] = {
40448+ [_DRM_FRAME_BUFFER] = "FB",
40449+ [_DRM_REGISTERS] = "REG",
40450+ [_DRM_SHM] = "SHM",
40451+ [_DRM_AGP] = "AGP",
40452+ [_DRM_SCATTER_GATHER] = "SG",
40453+ [_DRM_CONSISTENT] = "PCI"};
40454 const char *type;
40455 int i;
40456
40457@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40458 map = r_list->map;
40459 if (!map)
40460 continue;
40461- if (map->type < 0 || map->type > 5)
40462+ if (map->type >= ARRAY_SIZE(types))
40463 type = "??";
40464 else
40465 type = types[map->type];
40466diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40467index 2f4c4343..dd12cd2 100644
40468--- a/drivers/gpu/drm/drm_ioc32.c
40469+++ b/drivers/gpu/drm/drm_ioc32.c
40470@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40471 request = compat_alloc_user_space(nbytes);
40472 if (!access_ok(VERIFY_WRITE, request, nbytes))
40473 return -EFAULT;
40474- list = (struct drm_buf_desc *) (request + 1);
40475+ list = (struct drm_buf_desc __user *) (request + 1);
40476
40477 if (__put_user(count, &request->count)
40478 || __put_user(list, &request->list))
40479@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40480 request = compat_alloc_user_space(nbytes);
40481 if (!access_ok(VERIFY_WRITE, request, nbytes))
40482 return -EFAULT;
40483- list = (struct drm_buf_pub *) (request + 1);
40484+ list = (struct drm_buf_pub __user *) (request + 1);
40485
40486 if (__put_user(count, &request->count)
40487 || __put_user(list, &request->list))
40488@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40489 return 0;
40490 }
40491
40492-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40493+drm_ioctl_compat_t drm_compat_ioctls[] = {
40494 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40495 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40496 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40497@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40498 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40499 {
40500 unsigned int nr = DRM_IOCTL_NR(cmd);
40501- drm_ioctl_compat_t *fn;
40502 int ret;
40503
40504 /* Assume that ioctls without an explicit compat routine will just
40505@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40506 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40507 return drm_ioctl(filp, cmd, arg);
40508
40509- fn = drm_compat_ioctls[nr];
40510-
40511- if (fn != NULL)
40512- ret = (*fn) (filp, cmd, arg);
40513+ if (drm_compat_ioctls[nr] != NULL)
40514+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40515 else
40516 ret = drm_ioctl(filp, cmd, arg);
40517
40518diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40519index 00587a1..57a65ca 100644
40520--- a/drivers/gpu/drm/drm_ioctl.c
40521+++ b/drivers/gpu/drm/drm_ioctl.c
40522@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40523 struct drm_file *file_priv = filp->private_data;
40524 struct drm_device *dev;
40525 const struct drm_ioctl_desc *ioctl = NULL;
40526- drm_ioctl_t *func;
40527+ drm_ioctl_no_const_t func;
40528 unsigned int nr = DRM_IOCTL_NR(cmd);
40529 int retcode = -EINVAL;
40530 char stack_kdata[128];
40531diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40532index 93ec5dc..82acbaf 100644
40533--- a/drivers/gpu/drm/i810/i810_drv.h
40534+++ b/drivers/gpu/drm/i810/i810_drv.h
40535@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40536 int page_flipping;
40537
40538 wait_queue_head_t irq_queue;
40539- atomic_t irq_received;
40540- atomic_t irq_emitted;
40541+ atomic_unchecked_t irq_received;
40542+ atomic_unchecked_t irq_emitted;
40543
40544 int front_offset;
40545 } drm_i810_private_t;
40546diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40547index ecee3bc..ad5ae67 100644
40548--- a/drivers/gpu/drm/i915/i915_dma.c
40549+++ b/drivers/gpu/drm/i915/i915_dma.c
40550@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40551 * locking inversion with the driver load path. And the access here is
40552 * completely racy anyway. So don't bother with locking for now.
40553 */
40554- return dev->open_count == 0;
40555+ return local_read(&dev->open_count) == 0;
40556 }
40557
40558 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40559diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40560index 1173831..7dfb389 100644
40561--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40562+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40563@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40564 static int
40565 validate_exec_list(struct drm_device *dev,
40566 struct drm_i915_gem_exec_object2 *exec,
40567- int count)
40568+ unsigned int count)
40569 {
40570 unsigned relocs_total = 0;
40571 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40572 unsigned invalid_flags;
40573- int i;
40574+ unsigned int i;
40575
40576 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40577 if (USES_FULL_PPGTT(dev))
40578diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40579index 176de63..1ef9ac7 100644
40580--- a/drivers/gpu/drm/i915/i915_ioc32.c
40581+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40582@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40583 (unsigned long)request);
40584 }
40585
40586-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40587+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40588 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40589 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40590 [DRM_I915_GETPARAM] = compat_i915_getparam,
40591@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40592 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40593 {
40594 unsigned int nr = DRM_IOCTL_NR(cmd);
40595- drm_ioctl_compat_t *fn = NULL;
40596 int ret;
40597
40598 if (nr < DRM_COMMAND_BASE)
40599 return drm_compat_ioctl(filp, cmd, arg);
40600
40601- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40602- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40603-
40604- if (fn != NULL)
40605+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40606+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40607 ret = (*fn) (filp, cmd, arg);
40608- else
40609+ } else
40610 ret = drm_ioctl(filp, cmd, arg);
40611
40612 return ret;
40613diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40614index c10b52e..e5e27ff 100644
40615--- a/drivers/gpu/drm/i915/intel_display.c
40616+++ b/drivers/gpu/drm/i915/intel_display.c
40617@@ -12935,13 +12935,13 @@ struct intel_quirk {
40618 int subsystem_vendor;
40619 int subsystem_device;
40620 void (*hook)(struct drm_device *dev);
40621-};
40622+} __do_const;
40623
40624 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40625 struct intel_dmi_quirk {
40626 void (*hook)(struct drm_device *dev);
40627 const struct dmi_system_id (*dmi_id_list)[];
40628-};
40629+} __do_const;
40630
40631 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40632 {
40633@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40634 return 1;
40635 }
40636
40637-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40638+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40639 {
40640- .dmi_id_list = &(const struct dmi_system_id[]) {
40641- {
40642- .callback = intel_dmi_reverse_brightness,
40643- .ident = "NCR Corporation",
40644- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40645- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40646- },
40647- },
40648- { } /* terminating entry */
40649+ .callback = intel_dmi_reverse_brightness,
40650+ .ident = "NCR Corporation",
40651+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40652+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40653 },
40654+ },
40655+ { } /* terminating entry */
40656+};
40657+
40658+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40659+ {
40660+ .dmi_id_list = &intel_dmi_quirks_table,
40661 .hook = quirk_invert_brightness,
40662 },
40663 };
40664diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40665index b250130..98df2a4 100644
40666--- a/drivers/gpu/drm/imx/imx-drm-core.c
40667+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40668@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40669 if (imxdrm->pipes >= MAX_CRTC)
40670 return -EINVAL;
40671
40672- if (imxdrm->drm->open_count)
40673+ if (local_read(&imxdrm->drm->open_count))
40674 return -EBUSY;
40675
40676 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40677diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40678index b4a2014..219ab78 100644
40679--- a/drivers/gpu/drm/mga/mga_drv.h
40680+++ b/drivers/gpu/drm/mga/mga_drv.h
40681@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40682 u32 clear_cmd;
40683 u32 maccess;
40684
40685- atomic_t vbl_received; /**< Number of vblanks received. */
40686+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40687 wait_queue_head_t fence_queue;
40688- atomic_t last_fence_retired;
40689+ atomic_unchecked_t last_fence_retired;
40690 u32 next_fence_to_post;
40691
40692 unsigned int fb_cpp;
40693diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40694index 729bfd5..ead8823 100644
40695--- a/drivers/gpu/drm/mga/mga_ioc32.c
40696+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40697@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40698 return 0;
40699 }
40700
40701-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40702+drm_ioctl_compat_t mga_compat_ioctls[] = {
40703 [DRM_MGA_INIT] = compat_mga_init,
40704 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40705 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40706@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40707 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40708 {
40709 unsigned int nr = DRM_IOCTL_NR(cmd);
40710- drm_ioctl_compat_t *fn = NULL;
40711 int ret;
40712
40713 if (nr < DRM_COMMAND_BASE)
40714 return drm_compat_ioctl(filp, cmd, arg);
40715
40716- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40717- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40718-
40719- if (fn != NULL)
40720+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40721+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40722 ret = (*fn) (filp, cmd, arg);
40723- else
40724+ } else
40725 ret = drm_ioctl(filp, cmd, arg);
40726
40727 return ret;
40728diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40729index 1b071b8..de8601a 100644
40730--- a/drivers/gpu/drm/mga/mga_irq.c
40731+++ b/drivers/gpu/drm/mga/mga_irq.c
40732@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40733 if (crtc != 0)
40734 return 0;
40735
40736- return atomic_read(&dev_priv->vbl_received);
40737+ return atomic_read_unchecked(&dev_priv->vbl_received);
40738 }
40739
40740
40741@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40742 /* VBLANK interrupt */
40743 if (status & MGA_VLINEPEN) {
40744 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40745- atomic_inc(&dev_priv->vbl_received);
40746+ atomic_inc_unchecked(&dev_priv->vbl_received);
40747 drm_handle_vblank(dev, 0);
40748 handled = 1;
40749 }
40750@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40751 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40752 MGA_WRITE(MGA_PRIMEND, prim_end);
40753
40754- atomic_inc(&dev_priv->last_fence_retired);
40755+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40756 wake_up(&dev_priv->fence_queue);
40757 handled = 1;
40758 }
40759@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40760 * using fences.
40761 */
40762 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40763- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40764+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40765 - *sequence) <= (1 << 23)));
40766
40767 *sequence = cur_fence;
40768diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40769index 7df6acc..84bbe52 100644
40770--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40771+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40772@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40773 struct bit_table {
40774 const char id;
40775 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40776-};
40777+} __no_const;
40778
40779 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40780
40781diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40782index 8ae36f2..1147a30 100644
40783--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40784+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40785@@ -121,7 +121,6 @@ struct nouveau_drm {
40786 struct drm_global_reference mem_global_ref;
40787 struct ttm_bo_global_ref bo_global_ref;
40788 struct ttm_bo_device bdev;
40789- atomic_t validate_sequence;
40790 int (*move)(struct nouveau_channel *,
40791 struct ttm_buffer_object *,
40792 struct ttm_mem_reg *, struct ttm_mem_reg *);
40793diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40794index 462679a..88e32a7 100644
40795--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40796+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40797@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40798 unsigned long arg)
40799 {
40800 unsigned int nr = DRM_IOCTL_NR(cmd);
40801- drm_ioctl_compat_t *fn = NULL;
40802+ drm_ioctl_compat_t fn = NULL;
40803 int ret;
40804
40805 if (nr < DRM_COMMAND_BASE)
40806diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40807index 3d1cfcb..0542700 100644
40808--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40809+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40810@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40811 }
40812
40813 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40814- nouveau_vram_manager_init,
40815- nouveau_vram_manager_fini,
40816- nouveau_vram_manager_new,
40817- nouveau_vram_manager_del,
40818- nouveau_vram_manager_debug
40819+ .init = nouveau_vram_manager_init,
40820+ .takedown = nouveau_vram_manager_fini,
40821+ .get_node = nouveau_vram_manager_new,
40822+ .put_node = nouveau_vram_manager_del,
40823+ .debug = nouveau_vram_manager_debug
40824 };
40825
40826 static int
40827@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40828 }
40829
40830 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40831- nouveau_gart_manager_init,
40832- nouveau_gart_manager_fini,
40833- nouveau_gart_manager_new,
40834- nouveau_gart_manager_del,
40835- nouveau_gart_manager_debug
40836+ .init = nouveau_gart_manager_init,
40837+ .takedown = nouveau_gart_manager_fini,
40838+ .get_node = nouveau_gart_manager_new,
40839+ .put_node = nouveau_gart_manager_del,
40840+ .debug = nouveau_gart_manager_debug
40841 };
40842
40843 /*XXX*/
40844@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40845 }
40846
40847 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40848- nv04_gart_manager_init,
40849- nv04_gart_manager_fini,
40850- nv04_gart_manager_new,
40851- nv04_gart_manager_del,
40852- nv04_gart_manager_debug
40853+ .init = nv04_gart_manager_init,
40854+ .takedown = nv04_gart_manager_fini,
40855+ .get_node = nv04_gart_manager_new,
40856+ .put_node = nv04_gart_manager_del,
40857+ .debug = nv04_gart_manager_debug
40858 };
40859
40860 int
40861diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40862index c7592ec..dd45ebc 100644
40863--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40864+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40865@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40866 * locking inversion with the driver load path. And the access here is
40867 * completely racy anyway. So don't bother with locking for now.
40868 */
40869- return dev->open_count == 0;
40870+ return local_read(&dev->open_count) == 0;
40871 }
40872
40873 static const struct vga_switcheroo_client_ops
40874diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40875index 9782364..89bd954 100644
40876--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40877+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40878@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40879 int ret;
40880
40881 mutex_lock(&qdev->async_io_mutex);
40882- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40883+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40884 if (qdev->last_sent_io_cmd > irq_num) {
40885 if (intr)
40886 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40887- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40888+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40889 else
40890 ret = wait_event_timeout(qdev->io_cmd_event,
40891- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40892+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40893 /* 0 is timeout, just bail the "hw" has gone away */
40894 if (ret <= 0)
40895 goto out;
40896- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40897+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40898 }
40899 outb(val, addr);
40900 qdev->last_sent_io_cmd = irq_num + 1;
40901 if (intr)
40902 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40903- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40904+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40905 else
40906 ret = wait_event_timeout(qdev->io_cmd_event,
40907- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40908+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40909 out:
40910 if (ret > 0)
40911 ret = 0;
40912diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40913index 6911b8c..89d6867 100644
40914--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40915+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40916@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40917 struct drm_info_node *node = (struct drm_info_node *) m->private;
40918 struct qxl_device *qdev = node->minor->dev->dev_private;
40919
40920- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40921- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40922- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40923- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40924+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40925+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40926+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40927+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40928 seq_printf(m, "%d\n", qdev->irq_received_error);
40929 return 0;
40930 }
40931diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40932index 7c6cafe..460f542 100644
40933--- a/drivers/gpu/drm/qxl/qxl_drv.h
40934+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40935@@ -290,10 +290,10 @@ struct qxl_device {
40936 unsigned int last_sent_io_cmd;
40937
40938 /* interrupt handling */
40939- atomic_t irq_received;
40940- atomic_t irq_received_display;
40941- atomic_t irq_received_cursor;
40942- atomic_t irq_received_io_cmd;
40943+ atomic_unchecked_t irq_received;
40944+ atomic_unchecked_t irq_received_display;
40945+ atomic_unchecked_t irq_received_cursor;
40946+ atomic_unchecked_t irq_received_io_cmd;
40947 unsigned irq_received_error;
40948 wait_queue_head_t display_event;
40949 wait_queue_head_t cursor_event;
40950diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40951index b110883..dd06418 100644
40952--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40953+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40954@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40955
40956 /* TODO copy slow path code from i915 */
40957 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40958- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40959+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40960
40961 {
40962 struct qxl_drawable *draw = fb_cmd;
40963@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40964 struct drm_qxl_reloc reloc;
40965
40966 if (copy_from_user(&reloc,
40967- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40968+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40969 sizeof(reloc))) {
40970 ret = -EFAULT;
40971 goto out_free_bos;
40972@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40973
40974 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
40975
40976- struct drm_qxl_command *commands =
40977- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40978+ struct drm_qxl_command __user *commands =
40979+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
40980
40981- if (copy_from_user(&user_cmd, &commands[cmd_num],
40982+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40983 sizeof(user_cmd)))
40984 return -EFAULT;
40985
40986diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40987index 0bf1e20..42a7310 100644
40988--- a/drivers/gpu/drm/qxl/qxl_irq.c
40989+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40990@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
40991 if (!pending)
40992 return IRQ_NONE;
40993
40994- atomic_inc(&qdev->irq_received);
40995+ atomic_inc_unchecked(&qdev->irq_received);
40996
40997 if (pending & QXL_INTERRUPT_DISPLAY) {
40998- atomic_inc(&qdev->irq_received_display);
40999+ atomic_inc_unchecked(&qdev->irq_received_display);
41000 wake_up_all(&qdev->display_event);
41001 qxl_queue_garbage_collect(qdev, false);
41002 }
41003 if (pending & QXL_INTERRUPT_CURSOR) {
41004- atomic_inc(&qdev->irq_received_cursor);
41005+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41006 wake_up_all(&qdev->cursor_event);
41007 }
41008 if (pending & QXL_INTERRUPT_IO_CMD) {
41009- atomic_inc(&qdev->irq_received_io_cmd);
41010+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41011 wake_up_all(&qdev->io_cmd_event);
41012 }
41013 if (pending & QXL_INTERRUPT_ERROR) {
41014@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41015 init_waitqueue_head(&qdev->io_cmd_event);
41016 INIT_WORK(&qdev->client_monitors_config_work,
41017 qxl_client_monitors_config_work_func);
41018- atomic_set(&qdev->irq_received, 0);
41019- atomic_set(&qdev->irq_received_display, 0);
41020- atomic_set(&qdev->irq_received_cursor, 0);
41021- atomic_set(&qdev->irq_received_io_cmd, 0);
41022+ atomic_set_unchecked(&qdev->irq_received, 0);
41023+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41024+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41025+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41026 qdev->irq_received_error = 0;
41027 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41028 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41029diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41030index 0cbc4c9..0e46686 100644
41031--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41032+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41033@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41034 }
41035 }
41036
41037-static struct vm_operations_struct qxl_ttm_vm_ops;
41038+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41039 static const struct vm_operations_struct *ttm_vm_ops;
41040
41041 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41042@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41043 return r;
41044 if (unlikely(ttm_vm_ops == NULL)) {
41045 ttm_vm_ops = vma->vm_ops;
41046+ pax_open_kernel();
41047 qxl_ttm_vm_ops = *ttm_vm_ops;
41048 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41049+ pax_close_kernel();
41050 }
41051 vma->vm_ops = &qxl_ttm_vm_ops;
41052 return 0;
41053@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41054 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41055 {
41056 #if defined(CONFIG_DEBUG_FS)
41057- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41058- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41059- unsigned i;
41060+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41061+ {
41062+ .name = "qxl_mem_mm",
41063+ .show = &qxl_mm_dump_table,
41064+ },
41065+ {
41066+ .name = "qxl_surf_mm",
41067+ .show = &qxl_mm_dump_table,
41068+ }
41069+ };
41070
41071- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41072- if (i == 0)
41073- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41074- else
41075- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41076- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41077- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41078- qxl_mem_types_list[i].driver_features = 0;
41079- if (i == 0)
41080- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41081- else
41082- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41083+ pax_open_kernel();
41084+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41085+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41086+ pax_close_kernel();
41087
41088- }
41089- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41090+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41091 #else
41092 return 0;
41093 #endif
41094diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41095index 2c45ac9..5d740f8 100644
41096--- a/drivers/gpu/drm/r128/r128_cce.c
41097+++ b/drivers/gpu/drm/r128/r128_cce.c
41098@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41099
41100 /* GH: Simple idle check.
41101 */
41102- atomic_set(&dev_priv->idle_count, 0);
41103+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41104
41105 /* We don't support anything other than bus-mastering ring mode,
41106 * but the ring can be in either AGP or PCI space for the ring
41107diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41108index 723e5d6..102dbaf 100644
41109--- a/drivers/gpu/drm/r128/r128_drv.h
41110+++ b/drivers/gpu/drm/r128/r128_drv.h
41111@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41112 int is_pci;
41113 unsigned long cce_buffers_offset;
41114
41115- atomic_t idle_count;
41116+ atomic_unchecked_t idle_count;
41117
41118 int page_flipping;
41119 int current_page;
41120 u32 crtc_offset;
41121 u32 crtc_offset_cntl;
41122
41123- atomic_t vbl_received;
41124+ atomic_unchecked_t vbl_received;
41125
41126 u32 color_fmt;
41127 unsigned int front_offset;
41128diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41129index 663f38c..c689495 100644
41130--- a/drivers/gpu/drm/r128/r128_ioc32.c
41131+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41132@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41133 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41134 }
41135
41136-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41137+drm_ioctl_compat_t r128_compat_ioctls[] = {
41138 [DRM_R128_INIT] = compat_r128_init,
41139 [DRM_R128_DEPTH] = compat_r128_depth,
41140 [DRM_R128_STIPPLE] = compat_r128_stipple,
41141@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41142 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41143 {
41144 unsigned int nr = DRM_IOCTL_NR(cmd);
41145- drm_ioctl_compat_t *fn = NULL;
41146 int ret;
41147
41148 if (nr < DRM_COMMAND_BASE)
41149 return drm_compat_ioctl(filp, cmd, arg);
41150
41151- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41152- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41153-
41154- if (fn != NULL)
41155+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41156+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41157 ret = (*fn) (filp, cmd, arg);
41158- else
41159+ } else
41160 ret = drm_ioctl(filp, cmd, arg);
41161
41162 return ret;
41163diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41164index c2ae496..30b5993 100644
41165--- a/drivers/gpu/drm/r128/r128_irq.c
41166+++ b/drivers/gpu/drm/r128/r128_irq.c
41167@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41168 if (crtc != 0)
41169 return 0;
41170
41171- return atomic_read(&dev_priv->vbl_received);
41172+ return atomic_read_unchecked(&dev_priv->vbl_received);
41173 }
41174
41175 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41176@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41177 /* VBLANK interrupt */
41178 if (status & R128_CRTC_VBLANK_INT) {
41179 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41180- atomic_inc(&dev_priv->vbl_received);
41181+ atomic_inc_unchecked(&dev_priv->vbl_received);
41182 drm_handle_vblank(dev, 0);
41183 return IRQ_HANDLED;
41184 }
41185diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41186index 8fd2d9f..18c9660 100644
41187--- a/drivers/gpu/drm/r128/r128_state.c
41188+++ b/drivers/gpu/drm/r128/r128_state.c
41189@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41190
41191 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41192 {
41193- if (atomic_read(&dev_priv->idle_count) == 0)
41194+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41195 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41196 else
41197- atomic_set(&dev_priv->idle_count, 0);
41198+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41199 }
41200
41201 #endif
41202diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41203index b928c17..e5d9400 100644
41204--- a/drivers/gpu/drm/radeon/mkregtable.c
41205+++ b/drivers/gpu/drm/radeon/mkregtable.c
41206@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41207 regex_t mask_rex;
41208 regmatch_t match[4];
41209 char buf[1024];
41210- size_t end;
41211+ long end;
41212 int len;
41213 int done = 0;
41214 int r;
41215 unsigned o;
41216 struct offset *offset;
41217 char last_reg_s[10];
41218- int last_reg;
41219+ unsigned long last_reg;
41220
41221 if (regcomp
41222 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41223diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41224index bd7519f..e1c2cd95 100644
41225--- a/drivers/gpu/drm/radeon/radeon_device.c
41226+++ b/drivers/gpu/drm/radeon/radeon_device.c
41227@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41228 * locking inversion with the driver load path. And the access here is
41229 * completely racy anyway. So don't bother with locking for now.
41230 */
41231- return dev->open_count == 0;
41232+ return local_read(&dev->open_count) == 0;
41233 }
41234
41235 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41236diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41237index 46bd393..6ae4719 100644
41238--- a/drivers/gpu/drm/radeon/radeon_drv.h
41239+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41240@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41241
41242 /* SW interrupt */
41243 wait_queue_head_t swi_queue;
41244- atomic_t swi_emitted;
41245+ atomic_unchecked_t swi_emitted;
41246 int vblank_crtc;
41247 uint32_t irq_enable_reg;
41248 uint32_t r500_disp_irq_reg;
41249diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41250index 0b98ea1..0881827 100644
41251--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41252+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41253@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41254 request = compat_alloc_user_space(sizeof(*request));
41255 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41256 || __put_user(req32.param, &request->param)
41257- || __put_user((void __user *)(unsigned long)req32.value,
41258+ || __put_user((unsigned long)req32.value,
41259 &request->value))
41260 return -EFAULT;
41261
41262@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41263 #define compat_radeon_cp_setparam NULL
41264 #endif /* X86_64 || IA64 */
41265
41266-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41267+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41268 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41269 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41270 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41271@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41272 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41273 {
41274 unsigned int nr = DRM_IOCTL_NR(cmd);
41275- drm_ioctl_compat_t *fn = NULL;
41276 int ret;
41277
41278 if (nr < DRM_COMMAND_BASE)
41279 return drm_compat_ioctl(filp, cmd, arg);
41280
41281- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41282- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41283-
41284- if (fn != NULL)
41285+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41286+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41287 ret = (*fn) (filp, cmd, arg);
41288- else
41289+ } else
41290 ret = drm_ioctl(filp, cmd, arg);
41291
41292 return ret;
41293diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41294index 244b19b..c19226d 100644
41295--- a/drivers/gpu/drm/radeon/radeon_irq.c
41296+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41297@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41298 unsigned int ret;
41299 RING_LOCALS;
41300
41301- atomic_inc(&dev_priv->swi_emitted);
41302- ret = atomic_read(&dev_priv->swi_emitted);
41303+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41304+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41305
41306 BEGIN_RING(4);
41307 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41308@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41309 drm_radeon_private_t *dev_priv =
41310 (drm_radeon_private_t *) dev->dev_private;
41311
41312- atomic_set(&dev_priv->swi_emitted, 0);
41313+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41314 init_waitqueue_head(&dev_priv->swi_queue);
41315
41316 dev->max_vblank_count = 0x001fffff;
41317diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41318index 15aee72..cda326e 100644
41319--- a/drivers/gpu/drm/radeon/radeon_state.c
41320+++ b/drivers/gpu/drm/radeon/radeon_state.c
41321@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41322 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41323 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41324
41325- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41326+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41327 sarea_priv->nbox * sizeof(depth_boxes[0])))
41328 return -EFAULT;
41329
41330@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41331 {
41332 drm_radeon_private_t *dev_priv = dev->dev_private;
41333 drm_radeon_getparam_t *param = data;
41334- int value;
41335+ int value = 0;
41336
41337 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41338
41339diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41340index d02aa1d..ca19e2c 100644
41341--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41342+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41343@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41344 man->size = size >> PAGE_SHIFT;
41345 }
41346
41347-static struct vm_operations_struct radeon_ttm_vm_ops;
41348+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41349 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41350
41351 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41352@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41353 }
41354 if (unlikely(ttm_vm_ops == NULL)) {
41355 ttm_vm_ops = vma->vm_ops;
41356+ pax_open_kernel();
41357 radeon_ttm_vm_ops = *ttm_vm_ops;
41358 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41359+ pax_close_kernel();
41360 }
41361 vma->vm_ops = &radeon_ttm_vm_ops;
41362 return 0;
41363diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41364index 978993f..e36e50e 100644
41365--- a/drivers/gpu/drm/tegra/dc.c
41366+++ b/drivers/gpu/drm/tegra/dc.c
41367@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41368 }
41369
41370 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41371- dc->debugfs_files[i].data = dc;
41372+ *(void **)&dc->debugfs_files[i].data = dc;
41373
41374 err = drm_debugfs_create_files(dc->debugfs_files,
41375 ARRAY_SIZE(debugfs_files),
41376diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41377index 33f67fd..55ee9761 100644
41378--- a/drivers/gpu/drm/tegra/dsi.c
41379+++ b/drivers/gpu/drm/tegra/dsi.c
41380@@ -39,7 +39,7 @@ struct tegra_dsi {
41381 struct clk *clk_lp;
41382 struct clk *clk;
41383
41384- struct drm_info_list *debugfs_files;
41385+ drm_info_list_no_const *debugfs_files;
41386 struct drm_minor *minor;
41387 struct dentry *debugfs;
41388
41389diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41390index ffe2654..03c7b1c 100644
41391--- a/drivers/gpu/drm/tegra/hdmi.c
41392+++ b/drivers/gpu/drm/tegra/hdmi.c
41393@@ -60,7 +60,7 @@ struct tegra_hdmi {
41394 bool stereo;
41395 bool dvi;
41396
41397- struct drm_info_list *debugfs_files;
41398+ drm_info_list_no_const *debugfs_files;
41399 struct drm_minor *minor;
41400 struct dentry *debugfs;
41401 };
41402diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41403index aa0bd054..aea6a01 100644
41404--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41405+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41406@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41407 }
41408
41409 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41410- ttm_bo_man_init,
41411- ttm_bo_man_takedown,
41412- ttm_bo_man_get_node,
41413- ttm_bo_man_put_node,
41414- ttm_bo_man_debug
41415+ .init = ttm_bo_man_init,
41416+ .takedown = ttm_bo_man_takedown,
41417+ .get_node = ttm_bo_man_get_node,
41418+ .put_node = ttm_bo_man_put_node,
41419+ .debug = ttm_bo_man_debug
41420 };
41421 EXPORT_SYMBOL(ttm_bo_manager_func);
41422diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41423index a1803fb..c53f6b0 100644
41424--- a/drivers/gpu/drm/ttm/ttm_memory.c
41425+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41426@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41427 zone->glob = glob;
41428 glob->zone_kernel = zone;
41429 ret = kobject_init_and_add(
41430- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41431+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41432 if (unlikely(ret != 0)) {
41433 kobject_put(&zone->kobj);
41434 return ret;
41435@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41436 zone->glob = glob;
41437 glob->zone_dma32 = zone;
41438 ret = kobject_init_and_add(
41439- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41440+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41441 if (unlikely(ret != 0)) {
41442 kobject_put(&zone->kobj);
41443 return ret;
41444diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41445index 025c429..314062f 100644
41446--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41447+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41448@@ -54,7 +54,7 @@
41449
41450 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41451 #define SMALL_ALLOCATION 16
41452-#define FREE_ALL_PAGES (~0U)
41453+#define FREE_ALL_PAGES (~0UL)
41454 /* times are in msecs */
41455 #define PAGE_FREE_INTERVAL 1000
41456
41457@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41458 * @free_all: If set to true will free all pages in pool
41459 * @use_static: Safe to use static buffer
41460 **/
41461-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41462+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41463 bool use_static)
41464 {
41465 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41466 unsigned long irq_flags;
41467 struct page *p;
41468 struct page **pages_to_free;
41469- unsigned freed_pages = 0,
41470- npages_to_free = nr_free;
41471+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41472
41473 if (NUM_PAGES_TO_ALLOC < nr_free)
41474 npages_to_free = NUM_PAGES_TO_ALLOC;
41475@@ -371,7 +370,8 @@ restart:
41476 __list_del(&p->lru, &pool->list);
41477
41478 ttm_pool_update_free_locked(pool, freed_pages);
41479- nr_free -= freed_pages;
41480+ if (likely(nr_free != FREE_ALL_PAGES))
41481+ nr_free -= freed_pages;
41482 }
41483
41484 spin_unlock_irqrestore(&pool->lock, irq_flags);
41485@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41486 unsigned i;
41487 unsigned pool_offset;
41488 struct ttm_page_pool *pool;
41489- int shrink_pages = sc->nr_to_scan;
41490+ unsigned long shrink_pages = sc->nr_to_scan;
41491 unsigned long freed = 0;
41492
41493 if (!mutex_trylock(&lock))
41494@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41495 pool_offset = ++start_pool % NUM_POOLS;
41496 /* select start pool in round robin fashion */
41497 for (i = 0; i < NUM_POOLS; ++i) {
41498- unsigned nr_free = shrink_pages;
41499+ unsigned long nr_free = shrink_pages;
41500 if (shrink_pages == 0)
41501 break;
41502 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41503@@ -673,7 +673,7 @@ out:
41504 }
41505
41506 /* Put all pages in pages list to correct pool to wait for reuse */
41507-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41508+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41509 enum ttm_caching_state cstate)
41510 {
41511 unsigned long irq_flags;
41512@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41513 struct list_head plist;
41514 struct page *p = NULL;
41515 gfp_t gfp_flags = GFP_USER;
41516- unsigned count;
41517+ unsigned long count;
41518 int r;
41519
41520 /* set zero flag for page allocation if required */
41521diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41522index 01e1d27..aaa018a 100644
41523--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41524+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41525@@ -56,7 +56,7 @@
41526
41527 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41528 #define SMALL_ALLOCATION 4
41529-#define FREE_ALL_PAGES (~0U)
41530+#define FREE_ALL_PAGES (~0UL)
41531 /* times are in msecs */
41532 #define IS_UNDEFINED (0)
41533 #define IS_WC (1<<1)
41534@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41535 * @nr_free: If set to true will free all pages in pool
41536 * @use_static: Safe to use static buffer
41537 **/
41538-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41539+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41540 bool use_static)
41541 {
41542 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41543@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41544 struct dma_page *dma_p, *tmp;
41545 struct page **pages_to_free;
41546 struct list_head d_pages;
41547- unsigned freed_pages = 0,
41548- npages_to_free = nr_free;
41549+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41550
41551 if (NUM_PAGES_TO_ALLOC < nr_free)
41552 npages_to_free = NUM_PAGES_TO_ALLOC;
41553@@ -499,7 +498,8 @@ restart:
41554 /* remove range of pages from the pool */
41555 if (freed_pages) {
41556 ttm_pool_update_free_locked(pool, freed_pages);
41557- nr_free -= freed_pages;
41558+ if (likely(nr_free != FREE_ALL_PAGES))
41559+ nr_free -= freed_pages;
41560 }
41561
41562 spin_unlock_irqrestore(&pool->lock, irq_flags);
41563@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41564 struct dma_page *d_page, *next;
41565 enum pool_type type;
41566 bool is_cached = false;
41567- unsigned count = 0, i, npages = 0;
41568+ unsigned long count = 0, i, npages = 0;
41569 unsigned long irq_flags;
41570
41571 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41572@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41573 static unsigned start_pool;
41574 unsigned idx = 0;
41575 unsigned pool_offset;
41576- unsigned shrink_pages = sc->nr_to_scan;
41577+ unsigned long shrink_pages = sc->nr_to_scan;
41578 struct device_pools *p;
41579 unsigned long freed = 0;
41580
41581@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41582 goto out;
41583 pool_offset = ++start_pool % _manager->npools;
41584 list_for_each_entry(p, &_manager->pools, pools) {
41585- unsigned nr_free;
41586+ unsigned long nr_free;
41587
41588 if (!p->dev)
41589 continue;
41590@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41591 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41592 freed += nr_free - shrink_pages;
41593
41594- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41595+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41596 p->pool->dev_name, p->pool->name, current->pid,
41597 nr_free, shrink_pages);
41598 }
41599diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41600index 8cbcb45..a4d9cf7 100644
41601--- a/drivers/gpu/drm/udl/udl_fb.c
41602+++ b/drivers/gpu/drm/udl/udl_fb.c
41603@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41604 fb_deferred_io_cleanup(info);
41605 kfree(info->fbdefio);
41606 info->fbdefio = NULL;
41607- info->fbops->fb_mmap = udl_fb_mmap;
41608 }
41609
41610 pr_warn("released /dev/fb%d user=%d count=%d\n",
41611diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41612index ef8c500..01030c8 100644
41613--- a/drivers/gpu/drm/via/via_drv.h
41614+++ b/drivers/gpu/drm/via/via_drv.h
41615@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41616 typedef uint32_t maskarray_t[5];
41617
41618 typedef struct drm_via_irq {
41619- atomic_t irq_received;
41620+ atomic_unchecked_t irq_received;
41621 uint32_t pending_mask;
41622 uint32_t enable_mask;
41623 wait_queue_head_t irq_queue;
41624@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41625 struct timeval last_vblank;
41626 int last_vblank_valid;
41627 unsigned usec_per_vblank;
41628- atomic_t vbl_received;
41629+ atomic_unchecked_t vbl_received;
41630 drm_via_state_t hc_state;
41631 char pci_buf[VIA_PCI_BUF_SIZE];
41632 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41633diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41634index 1319433..a993b0c 100644
41635--- a/drivers/gpu/drm/via/via_irq.c
41636+++ b/drivers/gpu/drm/via/via_irq.c
41637@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41638 if (crtc != 0)
41639 return 0;
41640
41641- return atomic_read(&dev_priv->vbl_received);
41642+ return atomic_read_unchecked(&dev_priv->vbl_received);
41643 }
41644
41645 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41646@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41647
41648 status = VIA_READ(VIA_REG_INTERRUPT);
41649 if (status & VIA_IRQ_VBLANK_PENDING) {
41650- atomic_inc(&dev_priv->vbl_received);
41651- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41652+ atomic_inc_unchecked(&dev_priv->vbl_received);
41653+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41654 do_gettimeofday(&cur_vblank);
41655 if (dev_priv->last_vblank_valid) {
41656 dev_priv->usec_per_vblank =
41657@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41658 dev_priv->last_vblank = cur_vblank;
41659 dev_priv->last_vblank_valid = 1;
41660 }
41661- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41662+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41663 DRM_DEBUG("US per vblank is: %u\n",
41664 dev_priv->usec_per_vblank);
41665 }
41666@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41667
41668 for (i = 0; i < dev_priv->num_irqs; ++i) {
41669 if (status & cur_irq->pending_mask) {
41670- atomic_inc(&cur_irq->irq_received);
41671+ atomic_inc_unchecked(&cur_irq->irq_received);
41672 wake_up(&cur_irq->irq_queue);
41673 handled = 1;
41674 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41675@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41676 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41677 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41678 masks[irq][4]));
41679- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41680+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41681 } else {
41682 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41683 (((cur_irq_sequence =
41684- atomic_read(&cur_irq->irq_received)) -
41685+ atomic_read_unchecked(&cur_irq->irq_received)) -
41686 *sequence) <= (1 << 23)));
41687 }
41688 *sequence = cur_irq_sequence;
41689@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41690 }
41691
41692 for (i = 0; i < dev_priv->num_irqs; ++i) {
41693- atomic_set(&cur_irq->irq_received, 0);
41694+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41695 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41696 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41697 init_waitqueue_head(&cur_irq->irq_queue);
41698@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41699 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41700 case VIA_IRQ_RELATIVE:
41701 irqwait->request.sequence +=
41702- atomic_read(&cur_irq->irq_received);
41703+ atomic_read_unchecked(&cur_irq->irq_received);
41704 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41705 case VIA_IRQ_ABSOLUTE:
41706 break;
41707diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41708index d26a6da..5fa41ed 100644
41709--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41710+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41711@@ -447,7 +447,7 @@ struct vmw_private {
41712 * Fencing and IRQs.
41713 */
41714
41715- atomic_t marker_seq;
41716+ atomic_unchecked_t marker_seq;
41717 wait_queue_head_t fence_queue;
41718 wait_queue_head_t fifo_queue;
41719 spinlock_t waiter_lock;
41720diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41721index 39f2b03..d1b0a64 100644
41722--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41723+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41724@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41725 (unsigned int) min,
41726 (unsigned int) fifo->capabilities);
41727
41728- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41729+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41730 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41731 vmw_marker_queue_init(&fifo->marker_queue);
41732 return vmw_fifo_send_fence(dev_priv, &dummy);
41733@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41734 if (reserveable)
41735 iowrite32(bytes, fifo_mem +
41736 SVGA_FIFO_RESERVED);
41737- return fifo_mem + (next_cmd >> 2);
41738+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41739 } else {
41740 need_bounce = true;
41741 }
41742@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41743
41744 fm = vmw_fifo_reserve(dev_priv, bytes);
41745 if (unlikely(fm == NULL)) {
41746- *seqno = atomic_read(&dev_priv->marker_seq);
41747+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41748 ret = -ENOMEM;
41749 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41750 false, 3*HZ);
41751@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41752 }
41753
41754 do {
41755- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41756+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41757 } while (*seqno == 0);
41758
41759 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41760diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41761index 170b61b..fec7348 100644
41762--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41763+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41764@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41765 }
41766
41767 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41768- vmw_gmrid_man_init,
41769- vmw_gmrid_man_takedown,
41770- vmw_gmrid_man_get_node,
41771- vmw_gmrid_man_put_node,
41772- vmw_gmrid_man_debug
41773+ .init = vmw_gmrid_man_init,
41774+ .takedown = vmw_gmrid_man_takedown,
41775+ .get_node = vmw_gmrid_man_get_node,
41776+ .put_node = vmw_gmrid_man_put_node,
41777+ .debug = vmw_gmrid_man_debug
41778 };
41779diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41780index 69c8ce2..cacb0ab 100644
41781--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41782+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41783@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41784 int ret;
41785
41786 num_clips = arg->num_clips;
41787- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41788+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41789
41790 if (unlikely(num_clips == 0))
41791 return 0;
41792@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41793 int ret;
41794
41795 num_clips = arg->num_clips;
41796- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41797+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41798
41799 if (unlikely(num_clips == 0))
41800 return 0;
41801diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41802index 9fe9827..0aa2fc0 100644
41803--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41804+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41805@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41806 * emitted. Then the fence is stale and signaled.
41807 */
41808
41809- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41810+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41811 > VMW_FENCE_WRAP);
41812
41813 return ret;
41814@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41815
41816 if (fifo_idle)
41817 down_read(&fifo_state->rwsem);
41818- signal_seq = atomic_read(&dev_priv->marker_seq);
41819+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41820 ret = 0;
41821
41822 for (;;) {
41823diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41824index efd1ffd..0ae13ca 100644
41825--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41826+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41827@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41828 while (!vmw_lag_lt(queue, us)) {
41829 spin_lock(&queue->lock);
41830 if (list_empty(&queue->head))
41831- seqno = atomic_read(&dev_priv->marker_seq);
41832+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41833 else {
41834 marker = list_first_entry(&queue->head,
41835 struct vmw_marker, head);
41836diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41837index 37ac7b5..d52a5c9 100644
41838--- a/drivers/gpu/vga/vga_switcheroo.c
41839+++ b/drivers/gpu/vga/vga_switcheroo.c
41840@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41841
41842 /* this version is for the case where the power switch is separate
41843 to the device being powered down. */
41844-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41845+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41846 {
41847 /* copy over all the bus versions */
41848 if (dev->bus && dev->bus->pm) {
41849@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41850 return ret;
41851 }
41852
41853-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41854+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41855 {
41856 /* copy over all the bus versions */
41857 if (dev->bus && dev->bus->pm) {
41858diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41859index 8b63879..a5a5e72 100644
41860--- a/drivers/hid/hid-core.c
41861+++ b/drivers/hid/hid-core.c
41862@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41863
41864 int hid_add_device(struct hid_device *hdev)
41865 {
41866- static atomic_t id = ATOMIC_INIT(0);
41867+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41868 int ret;
41869
41870 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41871@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41872 /* XXX hack, any other cleaner solution after the driver core
41873 * is converted to allow more than 20 bytes as the device name? */
41874 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41875- hdev->vendor, hdev->product, atomic_inc_return(&id));
41876+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41877
41878 hid_debug_register(hdev, dev_name(&hdev->dev));
41879 ret = device_add(&hdev->dev);
41880diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41881index 5bc6d80..e47b55a 100644
41882--- a/drivers/hid/hid-logitech-dj.c
41883+++ b/drivers/hid/hid-logitech-dj.c
41884@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41885 * case we forward it to the correct hid device (via hid_input_report()
41886 * ) and return 1 so hid-core does not anything else with it.
41887 */
41888+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41889+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41890+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41891+ __func__, dj_report->device_index);
41892+ return false;
41893+ }
41894
41895 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41896 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41897diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41898index c13fb5b..55a3802 100644
41899--- a/drivers/hid/hid-wiimote-debug.c
41900+++ b/drivers/hid/hid-wiimote-debug.c
41901@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41902 else if (size == 0)
41903 return -EIO;
41904
41905- if (copy_to_user(u, buf, size))
41906+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41907 return -EFAULT;
41908
41909 *off += size;
41910diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41911index 433f72a..2926005 100644
41912--- a/drivers/hv/channel.c
41913+++ b/drivers/hv/channel.c
41914@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41915 unsigned long flags;
41916 int ret = 0;
41917
41918- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41919- atomic_inc(&vmbus_connection.next_gpadl_handle);
41920+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41921+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41922
41923 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41924 if (ret)
41925diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41926index 3e4235c..877d0e5 100644
41927--- a/drivers/hv/hv.c
41928+++ b/drivers/hv/hv.c
41929@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41930 u64 output_address = (output) ? virt_to_phys(output) : 0;
41931 u32 output_address_hi = output_address >> 32;
41932 u32 output_address_lo = output_address & 0xFFFFFFFF;
41933- void *hypercall_page = hv_context.hypercall_page;
41934+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41935
41936 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41937 "=a"(hv_status_lo) : "d" (control_hi),
41938@@ -156,7 +156,7 @@ int hv_init(void)
41939 /* See if the hypercall page is already set */
41940 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
41941
41942- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
41943+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
41944
41945 if (!virtaddr)
41946 goto cleanup;
41947diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41948index b958ded..b2452bb 100644
41949--- a/drivers/hv/hv_balloon.c
41950+++ b/drivers/hv/hv_balloon.c
41951@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41952
41953 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41954 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41955-static atomic_t trans_id = ATOMIC_INIT(0);
41956+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41957
41958 static int dm_ring_size = (5 * PAGE_SIZE);
41959
41960@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
41961 pr_info("Memory hot add failed\n");
41962
41963 dm->state = DM_INITIALIZED;
41964- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41965+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41966 vmbus_sendpacket(dm->dev->channel, &resp,
41967 sizeof(struct dm_hot_add_response),
41968 (unsigned long)NULL,
41969@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
41970 memset(&status, 0, sizeof(struct dm_status));
41971 status.hdr.type = DM_STATUS_REPORT;
41972 status.hdr.size = sizeof(struct dm_status);
41973- status.hdr.trans_id = atomic_inc_return(&trans_id);
41974+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41975
41976 /*
41977 * The host expects the guest to report free memory.
41978@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
41979 * send the status. This can happen if we were interrupted
41980 * after we picked our transaction ID.
41981 */
41982- if (status.hdr.trans_id != atomic_read(&trans_id))
41983+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41984 return;
41985
41986 /*
41987@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
41988 */
41989
41990 do {
41991- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41992+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41993 ret = vmbus_sendpacket(dm_device.dev->channel,
41994 bl_resp,
41995 bl_resp->hdr.size,
41996@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41997
41998 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41999 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42000- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42001+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42002 resp.hdr.size = sizeof(struct dm_unballoon_response);
42003
42004 vmbus_sendpacket(dm_device.dev->channel, &resp,
42005@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42006 memset(&version_req, 0, sizeof(struct dm_version_request));
42007 version_req.hdr.type = DM_VERSION_REQUEST;
42008 version_req.hdr.size = sizeof(struct dm_version_request);
42009- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42010+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42011 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42012 version_req.is_last_attempt = 1;
42013
42014@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
42015 memset(&version_req, 0, sizeof(struct dm_version_request));
42016 version_req.hdr.type = DM_VERSION_REQUEST;
42017 version_req.hdr.size = sizeof(struct dm_version_request);
42018- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42019+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42020 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42021 version_req.is_last_attempt = 0;
42022
42023@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
42024 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42025 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42026 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42027- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42028+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42029
42030 cap_msg.caps.cap_bits.balloon = 1;
42031 cap_msg.caps.cap_bits.hot_add = 1;
42032diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42033index c386d8d..d6004c4 100644
42034--- a/drivers/hv/hyperv_vmbus.h
42035+++ b/drivers/hv/hyperv_vmbus.h
42036@@ -611,7 +611,7 @@ enum vmbus_connect_state {
42037 struct vmbus_connection {
42038 enum vmbus_connect_state conn_state;
42039
42040- atomic_t next_gpadl_handle;
42041+ atomic_unchecked_t next_gpadl_handle;
42042
42043 /*
42044 * Represents channel interrupts. Each bit position represents a
42045diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42046index 4d6b269..2e23b86 100644
42047--- a/drivers/hv/vmbus_drv.c
42048+++ b/drivers/hv/vmbus_drv.c
42049@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42050 {
42051 int ret = 0;
42052
42053- static atomic_t device_num = ATOMIC_INIT(0);
42054+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42055
42056 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42057- atomic_inc_return(&device_num));
42058+ atomic_inc_return_unchecked(&device_num));
42059
42060 child_device_obj->device.bus = &hv_bus;
42061 child_device_obj->device.parent = &hv_acpi_dev->dev;
42062diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42063index 579bdf9..75118b5 100644
42064--- a/drivers/hwmon/acpi_power_meter.c
42065+++ b/drivers/hwmon/acpi_power_meter.c
42066@@ -116,7 +116,7 @@ struct sensor_template {
42067 struct device_attribute *devattr,
42068 const char *buf, size_t count);
42069 int index;
42070-};
42071+} __do_const;
42072
42073 /* Averaging interval */
42074 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42075@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42076 struct sensor_template *attrs)
42077 {
42078 struct device *dev = &resource->acpi_dev->dev;
42079- struct sensor_device_attribute *sensors =
42080+ sensor_device_attribute_no_const *sensors =
42081 &resource->sensors[resource->num_sensors];
42082 int res = 0;
42083
42084diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42085index 0af63da..05a183a 100644
42086--- a/drivers/hwmon/applesmc.c
42087+++ b/drivers/hwmon/applesmc.c
42088@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42089 {
42090 struct applesmc_node_group *grp;
42091 struct applesmc_dev_attr *node;
42092- struct attribute *attr;
42093+ attribute_no_const *attr;
42094 int ret, i;
42095
42096 for (grp = groups; grp->format; grp++) {
42097diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42098index cccef87..06ce8ec 100644
42099--- a/drivers/hwmon/asus_atk0110.c
42100+++ b/drivers/hwmon/asus_atk0110.c
42101@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42102 struct atk_sensor_data {
42103 struct list_head list;
42104 struct atk_data *data;
42105- struct device_attribute label_attr;
42106- struct device_attribute input_attr;
42107- struct device_attribute limit1_attr;
42108- struct device_attribute limit2_attr;
42109+ device_attribute_no_const label_attr;
42110+ device_attribute_no_const input_attr;
42111+ device_attribute_no_const limit1_attr;
42112+ device_attribute_no_const limit2_attr;
42113 char label_attr_name[ATTR_NAME_SIZE];
42114 char input_attr_name[ATTR_NAME_SIZE];
42115 char limit1_attr_name[ATTR_NAME_SIZE];
42116@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42117 static struct device_attribute atk_name_attr =
42118 __ATTR(name, 0444, atk_name_show, NULL);
42119
42120-static void atk_init_attribute(struct device_attribute *attr, char *name,
42121+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42122 sysfs_show_func show)
42123 {
42124 sysfs_attr_init(&attr->attr);
42125diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42126index 5b7fec8..05c957a 100644
42127--- a/drivers/hwmon/coretemp.c
42128+++ b/drivers/hwmon/coretemp.c
42129@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42130 return NOTIFY_OK;
42131 }
42132
42133-static struct notifier_block coretemp_cpu_notifier __refdata = {
42134+static struct notifier_block coretemp_cpu_notifier = {
42135 .notifier_call = coretemp_cpu_callback,
42136 };
42137
42138diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42139index 7a8a6fb..015c1fd 100644
42140--- a/drivers/hwmon/ibmaem.c
42141+++ b/drivers/hwmon/ibmaem.c
42142@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42143 struct aem_rw_sensor_template *rw)
42144 {
42145 struct device *dev = &data->pdev->dev;
42146- struct sensor_device_attribute *sensors = data->sensors;
42147+ sensor_device_attribute_no_const *sensors = data->sensors;
42148 int err;
42149
42150 /* Set up read-only sensors */
42151diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42152index 17ae2eb..21b71dd 100644
42153--- a/drivers/hwmon/iio_hwmon.c
42154+++ b/drivers/hwmon/iio_hwmon.c
42155@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42156 {
42157 struct device *dev = &pdev->dev;
42158 struct iio_hwmon_state *st;
42159- struct sensor_device_attribute *a;
42160+ sensor_device_attribute_no_const *a;
42161 int ret, i;
42162 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42163 enum iio_chan_type type;
42164diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42165index f3830db..9f4d6d5 100644
42166--- a/drivers/hwmon/nct6683.c
42167+++ b/drivers/hwmon/nct6683.c
42168@@ -397,11 +397,11 @@ static struct attribute_group *
42169 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42170 int repeat)
42171 {
42172- struct sensor_device_attribute_2 *a2;
42173- struct sensor_device_attribute *a;
42174+ sensor_device_attribute_2_no_const *a2;
42175+ sensor_device_attribute_no_const *a;
42176 struct sensor_device_template **t;
42177 struct sensor_device_attr_u *su;
42178- struct attribute_group *group;
42179+ attribute_group_no_const *group;
42180 struct attribute **attrs;
42181 int i, j, count;
42182
42183diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42184index 1be4117..88ae1e1 100644
42185--- a/drivers/hwmon/nct6775.c
42186+++ b/drivers/hwmon/nct6775.c
42187@@ -952,10 +952,10 @@ static struct attribute_group *
42188 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42189 int repeat)
42190 {
42191- struct attribute_group *group;
42192+ attribute_group_no_const *group;
42193 struct sensor_device_attr_u *su;
42194- struct sensor_device_attribute *a;
42195- struct sensor_device_attribute_2 *a2;
42196+ sensor_device_attribute_no_const *a;
42197+ sensor_device_attribute_2_no_const *a2;
42198 struct attribute **attrs;
42199 struct sensor_device_template **t;
42200 int i, count;
42201diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42202index f2e47c7..45d7941 100644
42203--- a/drivers/hwmon/pmbus/pmbus_core.c
42204+++ b/drivers/hwmon/pmbus/pmbus_core.c
42205@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42206 return 0;
42207 }
42208
42209-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42210+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42211 const char *name,
42212 umode_t mode,
42213 ssize_t (*show)(struct device *dev,
42214@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42215 dev_attr->store = store;
42216 }
42217
42218-static void pmbus_attr_init(struct sensor_device_attribute *a,
42219+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42220 const char *name,
42221 umode_t mode,
42222 ssize_t (*show)(struct device *dev,
42223@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42224 u16 reg, u8 mask)
42225 {
42226 struct pmbus_boolean *boolean;
42227- struct sensor_device_attribute *a;
42228+ sensor_device_attribute_no_const *a;
42229
42230 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42231 if (!boolean)
42232@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42233 bool update, bool readonly)
42234 {
42235 struct pmbus_sensor *sensor;
42236- struct device_attribute *a;
42237+ device_attribute_no_const *a;
42238
42239 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42240 if (!sensor)
42241@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42242 const char *lstring, int index)
42243 {
42244 struct pmbus_label *label;
42245- struct device_attribute *a;
42246+ device_attribute_no_const *a;
42247
42248 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42249 if (!label)
42250diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42251index d4f0935..7420593 100644
42252--- a/drivers/hwmon/sht15.c
42253+++ b/drivers/hwmon/sht15.c
42254@@ -169,7 +169,7 @@ struct sht15_data {
42255 int supply_uv;
42256 bool supply_uv_valid;
42257 struct work_struct update_supply_work;
42258- atomic_t interrupt_handled;
42259+ atomic_unchecked_t interrupt_handled;
42260 };
42261
42262 /**
42263@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42264 ret = gpio_direction_input(data->pdata->gpio_data);
42265 if (ret)
42266 return ret;
42267- atomic_set(&data->interrupt_handled, 0);
42268+ atomic_set_unchecked(&data->interrupt_handled, 0);
42269
42270 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42271 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42272 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42273 /* Only relevant if the interrupt hasn't occurred. */
42274- if (!atomic_read(&data->interrupt_handled))
42275+ if (!atomic_read_unchecked(&data->interrupt_handled))
42276 schedule_work(&data->read_work);
42277 }
42278 ret = wait_event_timeout(data->wait_queue,
42279@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42280
42281 /* First disable the interrupt */
42282 disable_irq_nosync(irq);
42283- atomic_inc(&data->interrupt_handled);
42284+ atomic_inc_unchecked(&data->interrupt_handled);
42285 /* Then schedule a reading work struct */
42286 if (data->state != SHT15_READING_NOTHING)
42287 schedule_work(&data->read_work);
42288@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42289 * If not, then start the interrupt again - care here as could
42290 * have gone low in meantime so verify it hasn't!
42291 */
42292- atomic_set(&data->interrupt_handled, 0);
42293+ atomic_set_unchecked(&data->interrupt_handled, 0);
42294 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42295 /* If still not occurred or another handler was scheduled */
42296 if (gpio_get_value(data->pdata->gpio_data)
42297- || atomic_read(&data->interrupt_handled))
42298+ || atomic_read_unchecked(&data->interrupt_handled))
42299 return;
42300 }
42301
42302diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42303index ac91c07..8e69663 100644
42304--- a/drivers/hwmon/via-cputemp.c
42305+++ b/drivers/hwmon/via-cputemp.c
42306@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42307 return NOTIFY_OK;
42308 }
42309
42310-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42311+static struct notifier_block via_cputemp_cpu_notifier = {
42312 .notifier_call = via_cputemp_cpu_callback,
42313 };
42314
42315diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42316index 65e3240..e6c511d 100644
42317--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42318+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42319@@ -39,7 +39,7 @@
42320 extern struct i2c_adapter amd756_smbus;
42321
42322 static struct i2c_adapter *s4882_adapter;
42323-static struct i2c_algorithm *s4882_algo;
42324+static i2c_algorithm_no_const *s4882_algo;
42325
42326 /* Wrapper access functions for multiplexed SMBus */
42327 static DEFINE_MUTEX(amd756_lock);
42328diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42329index b19a310..d6eece0 100644
42330--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42331+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42332@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42333 /* usb layer */
42334
42335 /* Send command to device, and get response. */
42336-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42337+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42338 {
42339 int ret = 0;
42340 int actual;
42341diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42342index 88eda09..cf40434 100644
42343--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42344+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42345@@ -37,7 +37,7 @@
42346 extern struct i2c_adapter *nforce2_smbus;
42347
42348 static struct i2c_adapter *s4985_adapter;
42349-static struct i2c_algorithm *s4985_algo;
42350+static i2c_algorithm_no_const *s4985_algo;
42351
42352 /* Wrapper access functions for multiplexed SMBus */
42353 static DEFINE_MUTEX(nforce2_lock);
42354diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42355index 71c7a39..71dd3e0 100644
42356--- a/drivers/i2c/i2c-dev.c
42357+++ b/drivers/i2c/i2c-dev.c
42358@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42359 break;
42360 }
42361
42362- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42363+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42364 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42365 if (IS_ERR(rdwr_pa[i].buf)) {
42366 res = PTR_ERR(rdwr_pa[i].buf);
42367diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42368index 0b510ba..4fbb5085 100644
42369--- a/drivers/ide/ide-cd.c
42370+++ b/drivers/ide/ide-cd.c
42371@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42372 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42373 if ((unsigned long)buf & alignment
42374 || blk_rq_bytes(rq) & q->dma_pad_mask
42375- || object_is_on_stack(buf))
42376+ || object_starts_on_stack(buf))
42377 drive->dma = 0;
42378 }
42379 }
42380diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42381index af3e76d..96dfe5e 100644
42382--- a/drivers/iio/industrialio-core.c
42383+++ b/drivers/iio/industrialio-core.c
42384@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42385 }
42386
42387 static
42388-int __iio_device_attr_init(struct device_attribute *dev_attr,
42389+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42390 const char *postfix,
42391 struct iio_chan_spec const *chan,
42392 ssize_t (*readfunc)(struct device *dev,
42393diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42394index e28a494..f7c2671 100644
42395--- a/drivers/infiniband/core/cm.c
42396+++ b/drivers/infiniband/core/cm.c
42397@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42398
42399 struct cm_counter_group {
42400 struct kobject obj;
42401- atomic_long_t counter[CM_ATTR_COUNT];
42402+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42403 };
42404
42405 struct cm_counter_attribute {
42406@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42407 struct ib_mad_send_buf *msg = NULL;
42408 int ret;
42409
42410- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42411+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42412 counter[CM_REQ_COUNTER]);
42413
42414 /* Quick state check to discard duplicate REQs. */
42415@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42416 if (!cm_id_priv)
42417 return;
42418
42419- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42420+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42421 counter[CM_REP_COUNTER]);
42422 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42423 if (ret)
42424@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42425 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42426 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42427 spin_unlock_irq(&cm_id_priv->lock);
42428- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42429+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42430 counter[CM_RTU_COUNTER]);
42431 goto out;
42432 }
42433@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42434 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42435 dreq_msg->local_comm_id);
42436 if (!cm_id_priv) {
42437- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42438+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42439 counter[CM_DREQ_COUNTER]);
42440 cm_issue_drep(work->port, work->mad_recv_wc);
42441 return -EINVAL;
42442@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42443 case IB_CM_MRA_REP_RCVD:
42444 break;
42445 case IB_CM_TIMEWAIT:
42446- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42447+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42448 counter[CM_DREQ_COUNTER]);
42449 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42450 goto unlock;
42451@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42452 cm_free_msg(msg);
42453 goto deref;
42454 case IB_CM_DREQ_RCVD:
42455- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42456+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42457 counter[CM_DREQ_COUNTER]);
42458 goto unlock;
42459 default:
42460@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42461 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42462 cm_id_priv->msg, timeout)) {
42463 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42464- atomic_long_inc(&work->port->
42465+ atomic_long_inc_unchecked(&work->port->
42466 counter_group[CM_RECV_DUPLICATES].
42467 counter[CM_MRA_COUNTER]);
42468 goto out;
42469@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42470 break;
42471 case IB_CM_MRA_REQ_RCVD:
42472 case IB_CM_MRA_REP_RCVD:
42473- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42474+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42475 counter[CM_MRA_COUNTER]);
42476 /* fall through */
42477 default:
42478@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42479 case IB_CM_LAP_IDLE:
42480 break;
42481 case IB_CM_MRA_LAP_SENT:
42482- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42483+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42484 counter[CM_LAP_COUNTER]);
42485 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42486 goto unlock;
42487@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42488 cm_free_msg(msg);
42489 goto deref;
42490 case IB_CM_LAP_RCVD:
42491- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42492+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42493 counter[CM_LAP_COUNTER]);
42494 goto unlock;
42495 default:
42496@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42497 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42498 if (cur_cm_id_priv) {
42499 spin_unlock_irq(&cm.lock);
42500- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42501+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42502 counter[CM_SIDR_REQ_COUNTER]);
42503 goto out; /* Duplicate message. */
42504 }
42505@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42506 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42507 msg->retries = 1;
42508
42509- atomic_long_add(1 + msg->retries,
42510+ atomic_long_add_unchecked(1 + msg->retries,
42511 &port->counter_group[CM_XMIT].counter[attr_index]);
42512 if (msg->retries)
42513- atomic_long_add(msg->retries,
42514+ atomic_long_add_unchecked(msg->retries,
42515 &port->counter_group[CM_XMIT_RETRIES].
42516 counter[attr_index]);
42517
42518@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42519 }
42520
42521 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42522- atomic_long_inc(&port->counter_group[CM_RECV].
42523+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42524 counter[attr_id - CM_ATTR_ID_OFFSET]);
42525
42526 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42527@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42528 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42529
42530 return sprintf(buf, "%ld\n",
42531- atomic_long_read(&group->counter[cm_attr->index]));
42532+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42533 }
42534
42535 static const struct sysfs_ops cm_counter_ops = {
42536diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42537index 9f5ad7c..588cd84 100644
42538--- a/drivers/infiniband/core/fmr_pool.c
42539+++ b/drivers/infiniband/core/fmr_pool.c
42540@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42541
42542 struct task_struct *thread;
42543
42544- atomic_t req_ser;
42545- atomic_t flush_ser;
42546+ atomic_unchecked_t req_ser;
42547+ atomic_unchecked_t flush_ser;
42548
42549 wait_queue_head_t force_wait;
42550 };
42551@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42552 struct ib_fmr_pool *pool = pool_ptr;
42553
42554 do {
42555- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42556+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42557 ib_fmr_batch_release(pool);
42558
42559- atomic_inc(&pool->flush_ser);
42560+ atomic_inc_unchecked(&pool->flush_ser);
42561 wake_up_interruptible(&pool->force_wait);
42562
42563 if (pool->flush_function)
42564@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42565 }
42566
42567 set_current_state(TASK_INTERRUPTIBLE);
42568- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42569+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42570 !kthread_should_stop())
42571 schedule();
42572 __set_current_state(TASK_RUNNING);
42573@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42574 pool->dirty_watermark = params->dirty_watermark;
42575 pool->dirty_len = 0;
42576 spin_lock_init(&pool->pool_lock);
42577- atomic_set(&pool->req_ser, 0);
42578- atomic_set(&pool->flush_ser, 0);
42579+ atomic_set_unchecked(&pool->req_ser, 0);
42580+ atomic_set_unchecked(&pool->flush_ser, 0);
42581 init_waitqueue_head(&pool->force_wait);
42582
42583 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42584@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42585 }
42586 spin_unlock_irq(&pool->pool_lock);
42587
42588- serial = atomic_inc_return(&pool->req_ser);
42589+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42590 wake_up_process(pool->thread);
42591
42592 if (wait_event_interruptible(pool->force_wait,
42593- atomic_read(&pool->flush_ser) - serial >= 0))
42594+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42595 return -EINTR;
42596
42597 return 0;
42598@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42599 } else {
42600 list_add_tail(&fmr->list, &pool->dirty_list);
42601 if (++pool->dirty_len >= pool->dirty_watermark) {
42602- atomic_inc(&pool->req_ser);
42603+ atomic_inc_unchecked(&pool->req_ser);
42604 wake_up_process(pool->thread);
42605 }
42606 }
42607diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
42608index aec7a6a..8c014b5 100644
42609--- a/drivers/infiniband/core/umem.c
42610+++ b/drivers/infiniband/core/umem.c
42611@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
42612 if (dmasync)
42613 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
42614
42615+ /*
42616+ * If the combination of the addr and size requested for this memory
42617+ * region causes an integer overflow, return error.
42618+ */
42619+ if ((PAGE_ALIGN(addr + size) <= size) ||
42620+ (PAGE_ALIGN(addr + size) <= addr))
42621+ return ERR_PTR(-EINVAL);
42622+
42623 if (!can_do_mlock())
42624 return ERR_PTR(-EPERM);
42625
42626diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
42627index 6c52e72..6303e3f 100644
42628--- a/drivers/infiniband/core/uverbs_cmd.c
42629+++ b/drivers/infiniband/core/uverbs_cmd.c
42630@@ -945,6 +945,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
42631 if (copy_from_user(&cmd, buf, sizeof cmd))
42632 return -EFAULT;
42633
42634+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
42635+ return -EFAULT;
42636+
42637 INIT_UDATA(&udata, buf + sizeof cmd,
42638 (unsigned long) cmd.response + sizeof resp,
42639 in_len - sizeof cmd, out_len - sizeof resp);
42640diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42641index cb43c22..2e12dd7 100644
42642--- a/drivers/infiniband/hw/cxgb4/mem.c
42643+++ b/drivers/infiniband/hw/cxgb4/mem.c
42644@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42645 int err;
42646 struct fw_ri_tpte tpt;
42647 u32 stag_idx;
42648- static atomic_t key;
42649+ static atomic_unchecked_t key;
42650
42651 if (c4iw_fatal_error(rdev))
42652 return -EIO;
42653@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42654 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42655 rdev->stats.stag.max = rdev->stats.stag.cur;
42656 mutex_unlock(&rdev->stats.lock);
42657- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42658+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42659 }
42660 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42661 __func__, stag_state, type, pdid, stag_idx);
42662diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42663index 79b3dbc..96e5fcc 100644
42664--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42665+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42666@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42667 struct ib_atomic_eth *ateth;
42668 struct ipath_ack_entry *e;
42669 u64 vaddr;
42670- atomic64_t *maddr;
42671+ atomic64_unchecked_t *maddr;
42672 u64 sdata;
42673 u32 rkey;
42674 u8 next;
42675@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42676 IB_ACCESS_REMOTE_ATOMIC)))
42677 goto nack_acc_unlck;
42678 /* Perform atomic OP and save result. */
42679- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42680+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42681 sdata = be64_to_cpu(ateth->swap_data);
42682 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42683 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42684- (u64) atomic64_add_return(sdata, maddr) - sdata :
42685+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42686 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42687 be64_to_cpu(ateth->compare_data),
42688 sdata);
42689diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42690index 1f95bba..9530f87 100644
42691--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42692+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42693@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42694 unsigned long flags;
42695 struct ib_wc wc;
42696 u64 sdata;
42697- atomic64_t *maddr;
42698+ atomic64_unchecked_t *maddr;
42699 enum ib_wc_status send_status;
42700
42701 /*
42702@@ -382,11 +382,11 @@ again:
42703 IB_ACCESS_REMOTE_ATOMIC)))
42704 goto acc_err;
42705 /* Perform atomic OP and save result. */
42706- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42707+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42708 sdata = wqe->wr.wr.atomic.compare_add;
42709 *(u64 *) sqp->s_sge.sge.vaddr =
42710 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42711- (u64) atomic64_add_return(sdata, maddr) - sdata :
42712+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42713 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42714 sdata, wqe->wr.wr.atomic.swap);
42715 goto send_comp;
42716diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42717index 82a7dd8..8fb6ba6 100644
42718--- a/drivers/infiniband/hw/mlx4/mad.c
42719+++ b/drivers/infiniband/hw/mlx4/mad.c
42720@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42721
42722 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42723 {
42724- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42725+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42726 cpu_to_be64(0xff00000000000000LL);
42727 }
42728
42729diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42730index ed327e6..ca1739e0 100644
42731--- a/drivers/infiniband/hw/mlx4/mcg.c
42732+++ b/drivers/infiniband/hw/mlx4/mcg.c
42733@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42734 {
42735 char name[20];
42736
42737- atomic_set(&ctx->tid, 0);
42738+ atomic_set_unchecked(&ctx->tid, 0);
42739 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42740 ctx->mcg_wq = create_singlethread_workqueue(name);
42741 if (!ctx->mcg_wq)
42742diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42743index 6eb743f..a7b0f6d 100644
42744--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42745+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42746@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42747 struct list_head mcg_mgid0_list;
42748 struct workqueue_struct *mcg_wq;
42749 struct mlx4_ib_demux_pv_ctx **tun;
42750- atomic_t tid;
42751+ atomic_unchecked_t tid;
42752 int flushing; /* flushing the work queue */
42753 };
42754
42755diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42756index 9d3e5c1..6f166df 100644
42757--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42758+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42759@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42760 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42761 }
42762
42763-int mthca_QUERY_FW(struct mthca_dev *dev)
42764+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42765 {
42766 struct mthca_mailbox *mailbox;
42767 u32 *outbox;
42768@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42769 CMD_TIME_CLASS_B);
42770 }
42771
42772-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42773+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42774 int num_mtt)
42775 {
42776 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42777@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42778 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42779 }
42780
42781-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42782+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42783 int eq_num)
42784 {
42785 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42786@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42787 CMD_TIME_CLASS_B);
42788 }
42789
42790-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42791+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42792 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42793 void *in_mad, void *response_mad)
42794 {
42795diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42796index ded76c1..0cf0a08 100644
42797--- a/drivers/infiniband/hw/mthca/mthca_main.c
42798+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42799@@ -692,7 +692,7 @@ err_close:
42800 return err;
42801 }
42802
42803-static int mthca_setup_hca(struct mthca_dev *dev)
42804+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42805 {
42806 int err;
42807
42808diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42809index ed9a989..6aa5dc2 100644
42810--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42811+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42812@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42813 * through the bitmaps)
42814 */
42815
42816-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42817+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42818 {
42819 int o;
42820 int m;
42821@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42822 return key;
42823 }
42824
42825-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42826+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42827 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42828 {
42829 struct mthca_mailbox *mailbox;
42830@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42831 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42832 }
42833
42834-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42835+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42836 u64 *buffer_list, int buffer_size_shift,
42837 int list_len, u64 iova, u64 total_size,
42838 u32 access, struct mthca_mr *mr)
42839diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42840index 415f8e1..e34214e 100644
42841--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42842+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42843@@ -764,7 +764,7 @@ unlock:
42844 return 0;
42845 }
42846
42847-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42848+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42849 {
42850 struct mthca_dev *dev = to_mdev(ibcq->device);
42851 struct mthca_cq *cq = to_mcq(ibcq);
42852diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42853index 3b2a6dc..bce26ff 100644
42854--- a/drivers/infiniband/hw/nes/nes.c
42855+++ b/drivers/infiniband/hw/nes/nes.c
42856@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42857 LIST_HEAD(nes_adapter_list);
42858 static LIST_HEAD(nes_dev_list);
42859
42860-atomic_t qps_destroyed;
42861+atomic_unchecked_t qps_destroyed;
42862
42863 static unsigned int ee_flsh_adapter;
42864 static unsigned int sysfs_nonidx_addr;
42865@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42866 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42867 struct nes_adapter *nesadapter = nesdev->nesadapter;
42868
42869- atomic_inc(&qps_destroyed);
42870+ atomic_inc_unchecked(&qps_destroyed);
42871
42872 /* Free the control structures */
42873
42874diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42875index bd9d132..70d84f4 100644
42876--- a/drivers/infiniband/hw/nes/nes.h
42877+++ b/drivers/infiniband/hw/nes/nes.h
42878@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42879 extern unsigned int wqm_quanta;
42880 extern struct list_head nes_adapter_list;
42881
42882-extern atomic_t cm_connects;
42883-extern atomic_t cm_accepts;
42884-extern atomic_t cm_disconnects;
42885-extern atomic_t cm_closes;
42886-extern atomic_t cm_connecteds;
42887-extern atomic_t cm_connect_reqs;
42888-extern atomic_t cm_rejects;
42889-extern atomic_t mod_qp_timouts;
42890-extern atomic_t qps_created;
42891-extern atomic_t qps_destroyed;
42892-extern atomic_t sw_qps_destroyed;
42893+extern atomic_unchecked_t cm_connects;
42894+extern atomic_unchecked_t cm_accepts;
42895+extern atomic_unchecked_t cm_disconnects;
42896+extern atomic_unchecked_t cm_closes;
42897+extern atomic_unchecked_t cm_connecteds;
42898+extern atomic_unchecked_t cm_connect_reqs;
42899+extern atomic_unchecked_t cm_rejects;
42900+extern atomic_unchecked_t mod_qp_timouts;
42901+extern atomic_unchecked_t qps_created;
42902+extern atomic_unchecked_t qps_destroyed;
42903+extern atomic_unchecked_t sw_qps_destroyed;
42904 extern u32 mh_detected;
42905 extern u32 mh_pauses_sent;
42906 extern u32 cm_packets_sent;
42907@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42908 extern u32 cm_packets_received;
42909 extern u32 cm_packets_dropped;
42910 extern u32 cm_packets_retrans;
42911-extern atomic_t cm_listens_created;
42912-extern atomic_t cm_listens_destroyed;
42913+extern atomic_unchecked_t cm_listens_created;
42914+extern atomic_unchecked_t cm_listens_destroyed;
42915 extern u32 cm_backlog_drops;
42916-extern atomic_t cm_loopbacks;
42917-extern atomic_t cm_nodes_created;
42918-extern atomic_t cm_nodes_destroyed;
42919-extern atomic_t cm_accel_dropped_pkts;
42920-extern atomic_t cm_resets_recvd;
42921-extern atomic_t pau_qps_created;
42922-extern atomic_t pau_qps_destroyed;
42923+extern atomic_unchecked_t cm_loopbacks;
42924+extern atomic_unchecked_t cm_nodes_created;
42925+extern atomic_unchecked_t cm_nodes_destroyed;
42926+extern atomic_unchecked_t cm_accel_dropped_pkts;
42927+extern atomic_unchecked_t cm_resets_recvd;
42928+extern atomic_unchecked_t pau_qps_created;
42929+extern atomic_unchecked_t pau_qps_destroyed;
42930
42931 extern u32 int_mod_timer_init;
42932 extern u32 int_mod_cq_depth_256;
42933diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42934index 6f09a72..cf4399d 100644
42935--- a/drivers/infiniband/hw/nes/nes_cm.c
42936+++ b/drivers/infiniband/hw/nes/nes_cm.c
42937@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42938 u32 cm_packets_retrans;
42939 u32 cm_packets_created;
42940 u32 cm_packets_received;
42941-atomic_t cm_listens_created;
42942-atomic_t cm_listens_destroyed;
42943+atomic_unchecked_t cm_listens_created;
42944+atomic_unchecked_t cm_listens_destroyed;
42945 u32 cm_backlog_drops;
42946-atomic_t cm_loopbacks;
42947-atomic_t cm_nodes_created;
42948-atomic_t cm_nodes_destroyed;
42949-atomic_t cm_accel_dropped_pkts;
42950-atomic_t cm_resets_recvd;
42951+atomic_unchecked_t cm_loopbacks;
42952+atomic_unchecked_t cm_nodes_created;
42953+atomic_unchecked_t cm_nodes_destroyed;
42954+atomic_unchecked_t cm_accel_dropped_pkts;
42955+atomic_unchecked_t cm_resets_recvd;
42956
42957 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42958 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42959@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
42960 /* instance of function pointers for client API */
42961 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42962 static struct nes_cm_ops nes_cm_api = {
42963- mini_cm_accelerated,
42964- mini_cm_listen,
42965- mini_cm_del_listen,
42966- mini_cm_connect,
42967- mini_cm_close,
42968- mini_cm_accept,
42969- mini_cm_reject,
42970- mini_cm_recv_pkt,
42971- mini_cm_dealloc_core,
42972- mini_cm_get,
42973- mini_cm_set
42974+ .accelerated = mini_cm_accelerated,
42975+ .listen = mini_cm_listen,
42976+ .stop_listener = mini_cm_del_listen,
42977+ .connect = mini_cm_connect,
42978+ .close = mini_cm_close,
42979+ .accept = mini_cm_accept,
42980+ .reject = mini_cm_reject,
42981+ .recv_pkt = mini_cm_recv_pkt,
42982+ .destroy_cm_core = mini_cm_dealloc_core,
42983+ .get = mini_cm_get,
42984+ .set = mini_cm_set
42985 };
42986
42987 static struct nes_cm_core *g_cm_core;
42988
42989-atomic_t cm_connects;
42990-atomic_t cm_accepts;
42991-atomic_t cm_disconnects;
42992-atomic_t cm_closes;
42993-atomic_t cm_connecteds;
42994-atomic_t cm_connect_reqs;
42995-atomic_t cm_rejects;
42996+atomic_unchecked_t cm_connects;
42997+atomic_unchecked_t cm_accepts;
42998+atomic_unchecked_t cm_disconnects;
42999+atomic_unchecked_t cm_closes;
43000+atomic_unchecked_t cm_connecteds;
43001+atomic_unchecked_t cm_connect_reqs;
43002+atomic_unchecked_t cm_rejects;
43003
43004 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43005 {
43006@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43007 kfree(listener);
43008 listener = NULL;
43009 ret = 0;
43010- atomic_inc(&cm_listens_destroyed);
43011+ atomic_inc_unchecked(&cm_listens_destroyed);
43012 } else {
43013 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43014 }
43015@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43016 cm_node->rem_mac);
43017
43018 add_hte_node(cm_core, cm_node);
43019- atomic_inc(&cm_nodes_created);
43020+ atomic_inc_unchecked(&cm_nodes_created);
43021
43022 return cm_node;
43023 }
43024@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43025 }
43026
43027 atomic_dec(&cm_core->node_cnt);
43028- atomic_inc(&cm_nodes_destroyed);
43029+ atomic_inc_unchecked(&cm_nodes_destroyed);
43030 nesqp = cm_node->nesqp;
43031 if (nesqp) {
43032 nesqp->cm_node = NULL;
43033@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43034
43035 static void drop_packet(struct sk_buff *skb)
43036 {
43037- atomic_inc(&cm_accel_dropped_pkts);
43038+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43039 dev_kfree_skb_any(skb);
43040 }
43041
43042@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43043 {
43044
43045 int reset = 0; /* whether to send reset in case of err.. */
43046- atomic_inc(&cm_resets_recvd);
43047+ atomic_inc_unchecked(&cm_resets_recvd);
43048 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43049 " refcnt=%d\n", cm_node, cm_node->state,
43050 atomic_read(&cm_node->ref_count));
43051@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43052 rem_ref_cm_node(cm_node->cm_core, cm_node);
43053 return NULL;
43054 }
43055- atomic_inc(&cm_loopbacks);
43056+ atomic_inc_unchecked(&cm_loopbacks);
43057 loopbackremotenode->loopbackpartner = cm_node;
43058 loopbackremotenode->tcp_cntxt.rcv_wscale =
43059 NES_CM_DEFAULT_RCV_WND_SCALE;
43060@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43061 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43062 else {
43063 rem_ref_cm_node(cm_core, cm_node);
43064- atomic_inc(&cm_accel_dropped_pkts);
43065+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43066 dev_kfree_skb_any(skb);
43067 }
43068 break;
43069@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43070
43071 if ((cm_id) && (cm_id->event_handler)) {
43072 if (issue_disconn) {
43073- atomic_inc(&cm_disconnects);
43074+ atomic_inc_unchecked(&cm_disconnects);
43075 cm_event.event = IW_CM_EVENT_DISCONNECT;
43076 cm_event.status = disconn_status;
43077 cm_event.local_addr = cm_id->local_addr;
43078@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43079 }
43080
43081 if (issue_close) {
43082- atomic_inc(&cm_closes);
43083+ atomic_inc_unchecked(&cm_closes);
43084 nes_disconnect(nesqp, 1);
43085
43086 cm_id->provider_data = nesqp;
43087@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43088
43089 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43090 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43091- atomic_inc(&cm_accepts);
43092+ atomic_inc_unchecked(&cm_accepts);
43093
43094 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43095 netdev_refcnt_read(nesvnic->netdev));
43096@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43097 struct nes_cm_core *cm_core;
43098 u8 *start_buff;
43099
43100- atomic_inc(&cm_rejects);
43101+ atomic_inc_unchecked(&cm_rejects);
43102 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43103 loopback = cm_node->loopbackpartner;
43104 cm_core = cm_node->cm_core;
43105@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43106 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43107 ntohs(laddr->sin_port));
43108
43109- atomic_inc(&cm_connects);
43110+ atomic_inc_unchecked(&cm_connects);
43111 nesqp->active_conn = 1;
43112
43113 /* cache the cm_id in the qp */
43114@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43115 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43116 return err;
43117 }
43118- atomic_inc(&cm_listens_created);
43119+ atomic_inc_unchecked(&cm_listens_created);
43120 }
43121
43122 cm_id->add_ref(cm_id);
43123@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43124
43125 if (nesqp->destroyed)
43126 return;
43127- atomic_inc(&cm_connecteds);
43128+ atomic_inc_unchecked(&cm_connecteds);
43129 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43130 " local port 0x%04X. jiffies = %lu.\n",
43131 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43132@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43133
43134 cm_id->add_ref(cm_id);
43135 ret = cm_id->event_handler(cm_id, &cm_event);
43136- atomic_inc(&cm_closes);
43137+ atomic_inc_unchecked(&cm_closes);
43138 cm_event.event = IW_CM_EVENT_CLOSE;
43139 cm_event.status = 0;
43140 cm_event.provider_data = cm_id->provider_data;
43141@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43142 return;
43143 cm_id = cm_node->cm_id;
43144
43145- atomic_inc(&cm_connect_reqs);
43146+ atomic_inc_unchecked(&cm_connect_reqs);
43147 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43148 cm_node, cm_id, jiffies);
43149
43150@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43151 return;
43152 cm_id = cm_node->cm_id;
43153
43154- atomic_inc(&cm_connect_reqs);
43155+ atomic_inc_unchecked(&cm_connect_reqs);
43156 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43157 cm_node, cm_id, jiffies);
43158
43159diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43160index 4166452..fc952c3 100644
43161--- a/drivers/infiniband/hw/nes/nes_mgt.c
43162+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43163@@ -40,8 +40,8 @@
43164 #include "nes.h"
43165 #include "nes_mgt.h"
43166
43167-atomic_t pau_qps_created;
43168-atomic_t pau_qps_destroyed;
43169+atomic_unchecked_t pau_qps_created;
43170+atomic_unchecked_t pau_qps_destroyed;
43171
43172 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43173 {
43174@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43175 {
43176 struct sk_buff *skb;
43177 unsigned long flags;
43178- atomic_inc(&pau_qps_destroyed);
43179+ atomic_inc_unchecked(&pau_qps_destroyed);
43180
43181 /* Free packets that have not yet been forwarded */
43182 /* Lock is acquired by skb_dequeue when removing the skb */
43183@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43184 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43185 skb_queue_head_init(&nesqp->pau_list);
43186 spin_lock_init(&nesqp->pau_lock);
43187- atomic_inc(&pau_qps_created);
43188+ atomic_inc_unchecked(&pau_qps_created);
43189 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43190 }
43191
43192diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43193index 49eb511..a774366 100644
43194--- a/drivers/infiniband/hw/nes/nes_nic.c
43195+++ b/drivers/infiniband/hw/nes/nes_nic.c
43196@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43197 target_stat_values[++index] = mh_detected;
43198 target_stat_values[++index] = mh_pauses_sent;
43199 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43200- target_stat_values[++index] = atomic_read(&cm_connects);
43201- target_stat_values[++index] = atomic_read(&cm_accepts);
43202- target_stat_values[++index] = atomic_read(&cm_disconnects);
43203- target_stat_values[++index] = atomic_read(&cm_connecteds);
43204- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43205- target_stat_values[++index] = atomic_read(&cm_rejects);
43206- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43207- target_stat_values[++index] = atomic_read(&qps_created);
43208- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43209- target_stat_values[++index] = atomic_read(&qps_destroyed);
43210- target_stat_values[++index] = atomic_read(&cm_closes);
43211+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43212+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43213+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43214+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43215+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43216+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43217+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43218+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43219+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43220+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43221+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43222 target_stat_values[++index] = cm_packets_sent;
43223 target_stat_values[++index] = cm_packets_bounced;
43224 target_stat_values[++index] = cm_packets_created;
43225 target_stat_values[++index] = cm_packets_received;
43226 target_stat_values[++index] = cm_packets_dropped;
43227 target_stat_values[++index] = cm_packets_retrans;
43228- target_stat_values[++index] = atomic_read(&cm_listens_created);
43229- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43230+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43231+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43232 target_stat_values[++index] = cm_backlog_drops;
43233- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43234- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43235- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43236- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43237- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43238+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43239+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43240+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43241+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43242+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43243 target_stat_values[++index] = nesadapter->free_4kpbl;
43244 target_stat_values[++index] = nesadapter->free_256pbl;
43245 target_stat_values[++index] = int_mod_timer_init;
43246 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43247 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43248 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43249- target_stat_values[++index] = atomic_read(&pau_qps_created);
43250- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43251+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43252+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43253 }
43254
43255 /**
43256diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43257index c0d0296..3185f57 100644
43258--- a/drivers/infiniband/hw/nes/nes_verbs.c
43259+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43260@@ -46,9 +46,9 @@
43261
43262 #include <rdma/ib_umem.h>
43263
43264-atomic_t mod_qp_timouts;
43265-atomic_t qps_created;
43266-atomic_t sw_qps_destroyed;
43267+atomic_unchecked_t mod_qp_timouts;
43268+atomic_unchecked_t qps_created;
43269+atomic_unchecked_t sw_qps_destroyed;
43270
43271 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43272
43273@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43274 if (init_attr->create_flags)
43275 return ERR_PTR(-EINVAL);
43276
43277- atomic_inc(&qps_created);
43278+ atomic_inc_unchecked(&qps_created);
43279 switch (init_attr->qp_type) {
43280 case IB_QPT_RC:
43281 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43282@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43283 struct iw_cm_event cm_event;
43284 int ret = 0;
43285
43286- atomic_inc(&sw_qps_destroyed);
43287+ atomic_inc_unchecked(&sw_qps_destroyed);
43288 nesqp->destroyed = 1;
43289
43290 /* Blow away the connection if it exists. */
43291diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43292index b218254..1d1aa3c 100644
43293--- a/drivers/infiniband/hw/qib/qib.h
43294+++ b/drivers/infiniband/hw/qib/qib.h
43295@@ -52,6 +52,7 @@
43296 #include <linux/kref.h>
43297 #include <linux/sched.h>
43298 #include <linux/kthread.h>
43299+#include <linux/slab.h>
43300
43301 #include "qib_common.h"
43302 #include "qib_verbs.h"
43303diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43304index cdc7df4..a2fdfdb 100644
43305--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43306+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43307@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43308 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43309 }
43310
43311-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43312+static struct rtnl_link_ops ipoib_link_ops = {
43313 .kind = "ipoib",
43314 .maxtype = IFLA_IPOIB_MAX,
43315 .policy = ipoib_policy,
43316diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43317index e853a21..56fc5a8 100644
43318--- a/drivers/input/gameport/gameport.c
43319+++ b/drivers/input/gameport/gameport.c
43320@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43321 */
43322 static void gameport_init_port(struct gameport *gameport)
43323 {
43324- static atomic_t gameport_no = ATOMIC_INIT(-1);
43325+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43326
43327 __module_get(THIS_MODULE);
43328
43329 mutex_init(&gameport->drv_mutex);
43330 device_initialize(&gameport->dev);
43331 dev_set_name(&gameport->dev, "gameport%lu",
43332- (unsigned long)atomic_inc_return(&gameport_no));
43333+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43334 gameport->dev.bus = &gameport_bus;
43335 gameport->dev.release = gameport_release_port;
43336 if (gameport->parent)
43337diff --git a/drivers/input/input.c b/drivers/input/input.c
43338index 213e3a1..4fea837 100644
43339--- a/drivers/input/input.c
43340+++ b/drivers/input/input.c
43341@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43342 */
43343 struct input_dev *input_allocate_device(void)
43344 {
43345- static atomic_t input_no = ATOMIC_INIT(-1);
43346+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43347 struct input_dev *dev;
43348
43349 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43350@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43351 INIT_LIST_HEAD(&dev->node);
43352
43353 dev_set_name(&dev->dev, "input%lu",
43354- (unsigned long)atomic_inc_return(&input_no));
43355+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43356
43357 __module_get(THIS_MODULE);
43358 }
43359diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43360index 4a95b22..874c182 100644
43361--- a/drivers/input/joystick/sidewinder.c
43362+++ b/drivers/input/joystick/sidewinder.c
43363@@ -30,6 +30,7 @@
43364 #include <linux/kernel.h>
43365 #include <linux/module.h>
43366 #include <linux/slab.h>
43367+#include <linux/sched.h>
43368 #include <linux/input.h>
43369 #include <linux/gameport.h>
43370 #include <linux/jiffies.h>
43371diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43372index 3aa2f3f..53c00ea 100644
43373--- a/drivers/input/joystick/xpad.c
43374+++ b/drivers/input/joystick/xpad.c
43375@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43376
43377 static int xpad_led_probe(struct usb_xpad *xpad)
43378 {
43379- static atomic_t led_seq = ATOMIC_INIT(-1);
43380+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43381 unsigned long led_no;
43382 struct xpad_led *led;
43383 struct led_classdev *led_cdev;
43384@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43385 if (!led)
43386 return -ENOMEM;
43387
43388- led_no = atomic_inc_return(&led_seq);
43389+ led_no = atomic_inc_return_unchecked(&led_seq);
43390
43391 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43392 led->xpad = xpad;
43393diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43394index ac1fa5f..5f7502c 100644
43395--- a/drivers/input/misc/ims-pcu.c
43396+++ b/drivers/input/misc/ims-pcu.c
43397@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43398
43399 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43400 {
43401- static atomic_t device_no = ATOMIC_INIT(-1);
43402+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43403
43404 const struct ims_pcu_device_info *info;
43405 int error;
43406@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43407 }
43408
43409 /* Device appears to be operable, complete initialization */
43410- pcu->device_no = atomic_inc_return(&device_no);
43411+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43412
43413 /*
43414 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43415diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43416index f4cf664..3204fda 100644
43417--- a/drivers/input/mouse/psmouse.h
43418+++ b/drivers/input/mouse/psmouse.h
43419@@ -117,7 +117,7 @@ struct psmouse_attribute {
43420 ssize_t (*set)(struct psmouse *psmouse, void *data,
43421 const char *buf, size_t count);
43422 bool protect;
43423-};
43424+} __do_const;
43425 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43426
43427 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43428diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43429index b604564..3f14ae4 100644
43430--- a/drivers/input/mousedev.c
43431+++ b/drivers/input/mousedev.c
43432@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43433
43434 spin_unlock_irq(&client->packet_lock);
43435
43436- if (copy_to_user(buffer, data, count))
43437+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43438 return -EFAULT;
43439
43440 return count;
43441diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43442index a05a517..323a2fd 100644
43443--- a/drivers/input/serio/serio.c
43444+++ b/drivers/input/serio/serio.c
43445@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43446 */
43447 static void serio_init_port(struct serio *serio)
43448 {
43449- static atomic_t serio_no = ATOMIC_INIT(-1);
43450+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43451
43452 __module_get(THIS_MODULE);
43453
43454@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43455 mutex_init(&serio->drv_mutex);
43456 device_initialize(&serio->dev);
43457 dev_set_name(&serio->dev, "serio%lu",
43458- (unsigned long)atomic_inc_return(&serio_no));
43459+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43460 serio->dev.bus = &serio_bus;
43461 serio->dev.release = serio_release_port;
43462 serio->dev.groups = serio_device_attr_groups;
43463diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43464index 71ef5d6..93380a9 100644
43465--- a/drivers/input/serio/serio_raw.c
43466+++ b/drivers/input/serio/serio_raw.c
43467@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43468
43469 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43470 {
43471- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43472+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43473 struct serio_raw *serio_raw;
43474 int err;
43475
43476@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43477 }
43478
43479 snprintf(serio_raw->name, sizeof(serio_raw->name),
43480- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43481+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43482 kref_init(&serio_raw->kref);
43483 INIT_LIST_HEAD(&serio_raw->client_list);
43484 init_waitqueue_head(&serio_raw->wait);
43485diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43486index 9802485..2e9941d 100644
43487--- a/drivers/iommu/amd_iommu.c
43488+++ b/drivers/iommu/amd_iommu.c
43489@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43490
43491 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43492 {
43493+ phys_addr_t physaddr;
43494 WARN_ON(address & 0x7ULL);
43495
43496 memset(cmd, 0, sizeof(*cmd));
43497- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43498- cmd->data[1] = upper_32_bits(__pa(address));
43499+
43500+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43501+ if (object_starts_on_stack((void *)address)) {
43502+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43503+ physaddr = __pa((u64)adjbuf);
43504+ } else
43505+#endif
43506+ physaddr = __pa(address);
43507+
43508+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43509+ cmd->data[1] = upper_32_bits(physaddr);
43510 cmd->data[2] = 1;
43511 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43512 }
43513diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43514index 6cd47b7..264d14a 100644
43515--- a/drivers/iommu/arm-smmu.c
43516+++ b/drivers/iommu/arm-smmu.c
43517@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43518 cfg->irptndx = cfg->cbndx;
43519 }
43520
43521- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43522+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43523 arm_smmu_init_context_bank(smmu_domain);
43524 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43525
43526diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43527index f7718d7..3ef740b 100644
43528--- a/drivers/iommu/iommu.c
43529+++ b/drivers/iommu/iommu.c
43530@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43531 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43532 {
43533 int err;
43534- struct notifier_block *nb;
43535+ notifier_block_no_const *nb;
43536 struct iommu_callback_data cb = {
43537 .ops = ops,
43538 };
43539diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43540index 89c4846..1de796f 100644
43541--- a/drivers/iommu/irq_remapping.c
43542+++ b/drivers/iommu/irq_remapping.c
43543@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43544 void panic_if_irq_remap(const char *msg)
43545 {
43546 if (irq_remapping_enabled)
43547- panic(msg);
43548+ panic("%s", msg);
43549 }
43550
43551 static void ir_ack_apic_edge(struct irq_data *data)
43552@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43553
43554 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43555 {
43556- chip->irq_print_chip = ir_print_prefix;
43557- chip->irq_ack = ir_ack_apic_edge;
43558- chip->irq_eoi = ir_ack_apic_level;
43559- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43560+ pax_open_kernel();
43561+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43562+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43563+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43564+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43565+ pax_close_kernel();
43566 }
43567
43568 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43569diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43570index d617ee5..df8be8b 100644
43571--- a/drivers/irqchip/irq-gic.c
43572+++ b/drivers/irqchip/irq-gic.c
43573@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43574 * Supported arch specific GIC irq extension.
43575 * Default make them NULL.
43576 */
43577-struct irq_chip gic_arch_extn = {
43578+irq_chip_no_const gic_arch_extn = {
43579 .irq_eoi = NULL,
43580 .irq_mask = NULL,
43581 .irq_unmask = NULL,
43582@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43583 chained_irq_exit(chip, desc);
43584 }
43585
43586-static struct irq_chip gic_chip = {
43587+static irq_chip_no_const gic_chip __read_only = {
43588 .name = "GIC",
43589 .irq_mask = gic_mask_irq,
43590 .irq_unmask = gic_unmask_irq,
43591diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43592index 078cac5..fb0f846 100644
43593--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43594+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43595@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43596 struct intc_irqpin_iomem *i;
43597 struct resource *io[INTC_IRQPIN_REG_NR];
43598 struct resource *irq;
43599- struct irq_chip *irq_chip;
43600+ irq_chip_no_const *irq_chip;
43601 void (*enable_fn)(struct irq_data *d);
43602 void (*disable_fn)(struct irq_data *d);
43603 const char *name = dev_name(dev);
43604diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43605index 384e6ed..7a771b2 100644
43606--- a/drivers/irqchip/irq-renesas-irqc.c
43607+++ b/drivers/irqchip/irq-renesas-irqc.c
43608@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43609 struct irqc_priv *p;
43610 struct resource *io;
43611 struct resource *irq;
43612- struct irq_chip *irq_chip;
43613+ irq_chip_no_const *irq_chip;
43614 const char *name = dev_name(&pdev->dev);
43615 int ret;
43616 int k;
43617diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43618index 6a2df32..dc962f1 100644
43619--- a/drivers/isdn/capi/capi.c
43620+++ b/drivers/isdn/capi/capi.c
43621@@ -81,8 +81,8 @@ struct capiminor {
43622
43623 struct capi20_appl *ap;
43624 u32 ncci;
43625- atomic_t datahandle;
43626- atomic_t msgid;
43627+ atomic_unchecked_t datahandle;
43628+ atomic_unchecked_t msgid;
43629
43630 struct tty_port port;
43631 int ttyinstop;
43632@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43633 capimsg_setu16(s, 2, mp->ap->applid);
43634 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43635 capimsg_setu8 (s, 5, CAPI_RESP);
43636- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43637+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43638 capimsg_setu32(s, 8, mp->ncci);
43639 capimsg_setu16(s, 12, datahandle);
43640 }
43641@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43642 mp->outbytes -= len;
43643 spin_unlock_bh(&mp->outlock);
43644
43645- datahandle = atomic_inc_return(&mp->datahandle);
43646+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43647 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43648 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43649 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43650 capimsg_setu16(skb->data, 2, mp->ap->applid);
43651 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43652 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43653- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43654+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43655 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43656 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43657 capimsg_setu16(skb->data, 16, len); /* Data length */
43658diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43659index aecec6d..11e13c5 100644
43660--- a/drivers/isdn/gigaset/bas-gigaset.c
43661+++ b/drivers/isdn/gigaset/bas-gigaset.c
43662@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43663
43664
43665 static const struct gigaset_ops gigops = {
43666- gigaset_write_cmd,
43667- gigaset_write_room,
43668- gigaset_chars_in_buffer,
43669- gigaset_brkchars,
43670- gigaset_init_bchannel,
43671- gigaset_close_bchannel,
43672- gigaset_initbcshw,
43673- gigaset_freebcshw,
43674- gigaset_reinitbcshw,
43675- gigaset_initcshw,
43676- gigaset_freecshw,
43677- gigaset_set_modem_ctrl,
43678- gigaset_baud_rate,
43679- gigaset_set_line_ctrl,
43680- gigaset_isoc_send_skb,
43681- gigaset_isoc_input,
43682+ .write_cmd = gigaset_write_cmd,
43683+ .write_room = gigaset_write_room,
43684+ .chars_in_buffer = gigaset_chars_in_buffer,
43685+ .brkchars = gigaset_brkchars,
43686+ .init_bchannel = gigaset_init_bchannel,
43687+ .close_bchannel = gigaset_close_bchannel,
43688+ .initbcshw = gigaset_initbcshw,
43689+ .freebcshw = gigaset_freebcshw,
43690+ .reinitbcshw = gigaset_reinitbcshw,
43691+ .initcshw = gigaset_initcshw,
43692+ .freecshw = gigaset_freecshw,
43693+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43694+ .baud_rate = gigaset_baud_rate,
43695+ .set_line_ctrl = gigaset_set_line_ctrl,
43696+ .send_skb = gigaset_isoc_send_skb,
43697+ .handle_input = gigaset_isoc_input,
43698 };
43699
43700 /* bas_gigaset_init
43701diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43702index 600c79b..3752bab 100644
43703--- a/drivers/isdn/gigaset/interface.c
43704+++ b/drivers/isdn/gigaset/interface.c
43705@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43706 }
43707 tty->driver_data = cs;
43708
43709- ++cs->port.count;
43710+ atomic_inc(&cs->port.count);
43711
43712- if (cs->port.count == 1) {
43713+ if (atomic_read(&cs->port.count) == 1) {
43714 tty_port_tty_set(&cs->port, tty);
43715 cs->port.low_latency = 1;
43716 }
43717@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43718
43719 if (!cs->connected)
43720 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43721- else if (!cs->port.count)
43722+ else if (!atomic_read(&cs->port.count))
43723 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43724- else if (!--cs->port.count)
43725+ else if (!atomic_dec_return(&cs->port.count))
43726 tty_port_tty_set(&cs->port, NULL);
43727
43728 mutex_unlock(&cs->mutex);
43729diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43730index 8c91fd5..14f13ce 100644
43731--- a/drivers/isdn/gigaset/ser-gigaset.c
43732+++ b/drivers/isdn/gigaset/ser-gigaset.c
43733@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43734 }
43735
43736 static const struct gigaset_ops ops = {
43737- gigaset_write_cmd,
43738- gigaset_write_room,
43739- gigaset_chars_in_buffer,
43740- gigaset_brkchars,
43741- gigaset_init_bchannel,
43742- gigaset_close_bchannel,
43743- gigaset_initbcshw,
43744- gigaset_freebcshw,
43745- gigaset_reinitbcshw,
43746- gigaset_initcshw,
43747- gigaset_freecshw,
43748- gigaset_set_modem_ctrl,
43749- gigaset_baud_rate,
43750- gigaset_set_line_ctrl,
43751- gigaset_m10x_send_skb, /* asyncdata.c */
43752- gigaset_m10x_input, /* asyncdata.c */
43753+ .write_cmd = gigaset_write_cmd,
43754+ .write_room = gigaset_write_room,
43755+ .chars_in_buffer = gigaset_chars_in_buffer,
43756+ .brkchars = gigaset_brkchars,
43757+ .init_bchannel = gigaset_init_bchannel,
43758+ .close_bchannel = gigaset_close_bchannel,
43759+ .initbcshw = gigaset_initbcshw,
43760+ .freebcshw = gigaset_freebcshw,
43761+ .reinitbcshw = gigaset_reinitbcshw,
43762+ .initcshw = gigaset_initcshw,
43763+ .freecshw = gigaset_freecshw,
43764+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43765+ .baud_rate = gigaset_baud_rate,
43766+ .set_line_ctrl = gigaset_set_line_ctrl,
43767+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43768+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43769 };
43770
43771
43772diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43773index 5f306e2..5342f88 100644
43774--- a/drivers/isdn/gigaset/usb-gigaset.c
43775+++ b/drivers/isdn/gigaset/usb-gigaset.c
43776@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43777 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43778 memcpy(cs->hw.usb->bchars, buf, 6);
43779 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43780- 0, 0, &buf, 6, 2000);
43781+ 0, 0, buf, 6, 2000);
43782 }
43783
43784 static void gigaset_freebcshw(struct bc_state *bcs)
43785@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43786 }
43787
43788 static const struct gigaset_ops ops = {
43789- gigaset_write_cmd,
43790- gigaset_write_room,
43791- gigaset_chars_in_buffer,
43792- gigaset_brkchars,
43793- gigaset_init_bchannel,
43794- gigaset_close_bchannel,
43795- gigaset_initbcshw,
43796- gigaset_freebcshw,
43797- gigaset_reinitbcshw,
43798- gigaset_initcshw,
43799- gigaset_freecshw,
43800- gigaset_set_modem_ctrl,
43801- gigaset_baud_rate,
43802- gigaset_set_line_ctrl,
43803- gigaset_m10x_send_skb,
43804- gigaset_m10x_input,
43805+ .write_cmd = gigaset_write_cmd,
43806+ .write_room = gigaset_write_room,
43807+ .chars_in_buffer = gigaset_chars_in_buffer,
43808+ .brkchars = gigaset_brkchars,
43809+ .init_bchannel = gigaset_init_bchannel,
43810+ .close_bchannel = gigaset_close_bchannel,
43811+ .initbcshw = gigaset_initbcshw,
43812+ .freebcshw = gigaset_freebcshw,
43813+ .reinitbcshw = gigaset_reinitbcshw,
43814+ .initcshw = gigaset_initcshw,
43815+ .freecshw = gigaset_freecshw,
43816+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43817+ .baud_rate = gigaset_baud_rate,
43818+ .set_line_ctrl = gigaset_set_line_ctrl,
43819+ .send_skb = gigaset_m10x_send_skb,
43820+ .handle_input = gigaset_m10x_input,
43821 };
43822
43823 /*
43824diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43825index 4d9b195..455075c 100644
43826--- a/drivers/isdn/hardware/avm/b1.c
43827+++ b/drivers/isdn/hardware/avm/b1.c
43828@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43829 }
43830 if (left) {
43831 if (t4file->user) {
43832- if (copy_from_user(buf, dp, left))
43833+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43834 return -EFAULT;
43835 } else {
43836 memcpy(buf, dp, left);
43837@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43838 }
43839 if (left) {
43840 if (config->user) {
43841- if (copy_from_user(buf, dp, left))
43842+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43843 return -EFAULT;
43844 } else {
43845 memcpy(buf, dp, left);
43846diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43847index 9b856e1..fa03c92 100644
43848--- a/drivers/isdn/i4l/isdn_common.c
43849+++ b/drivers/isdn/i4l/isdn_common.c
43850@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43851 } else
43852 return -EINVAL;
43853 case IIOCDBGVAR:
43854+ if (!capable(CAP_SYS_RAWIO))
43855+ return -EPERM;
43856 if (arg) {
43857 if (copy_to_user(argp, &dev, sizeof(ulong)))
43858 return -EFAULT;
43859diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43860index 91d5730..336523e 100644
43861--- a/drivers/isdn/i4l/isdn_concap.c
43862+++ b/drivers/isdn/i4l/isdn_concap.c
43863@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43864 }
43865
43866 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43867- &isdn_concap_dl_data_req,
43868- &isdn_concap_dl_connect_req,
43869- &isdn_concap_dl_disconn_req
43870+ .data_req = &isdn_concap_dl_data_req,
43871+ .connect_req = &isdn_concap_dl_connect_req,
43872+ .disconn_req = &isdn_concap_dl_disconn_req
43873 };
43874
43875 /* The following should better go into a dedicated source file such that
43876diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43877index bc91261..2ef7e36 100644
43878--- a/drivers/isdn/i4l/isdn_tty.c
43879+++ b/drivers/isdn/i4l/isdn_tty.c
43880@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43881
43882 #ifdef ISDN_DEBUG_MODEM_OPEN
43883 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43884- port->count);
43885+ atomic_read(&port->count));
43886 #endif
43887- port->count++;
43888+ atomic_inc(&port->count);
43889 port->tty = tty;
43890 /*
43891 * Start up serial port
43892@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43893 #endif
43894 return;
43895 }
43896- if ((tty->count == 1) && (port->count != 1)) {
43897+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43898 /*
43899 * Uh, oh. tty->count is 1, which means that the tty
43900 * structure will be freed. Info->count should always
43901@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43902 * serial port won't be shutdown.
43903 */
43904 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43905- "info->count is %d\n", port->count);
43906- port->count = 1;
43907+ "info->count is %d\n", atomic_read(&port->count));
43908+ atomic_set(&port->count, 1);
43909 }
43910- if (--port->count < 0) {
43911+ if (atomic_dec_return(&port->count) < 0) {
43912 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43913- info->line, port->count);
43914- port->count = 0;
43915+ info->line, atomic_read(&port->count));
43916+ atomic_set(&port->count, 0);
43917 }
43918- if (port->count) {
43919+ if (atomic_read(&port->count)) {
43920 #ifdef ISDN_DEBUG_MODEM_OPEN
43921 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43922 #endif
43923@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43924 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43925 return;
43926 isdn_tty_shutdown(info);
43927- port->count = 0;
43928+ atomic_set(&port->count, 0);
43929 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43930 port->tty = NULL;
43931 wake_up_interruptible(&port->open_wait);
43932@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43933 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43934 modem_info *info = &dev->mdm.info[i];
43935
43936- if (info->port.count == 0)
43937+ if (atomic_read(&info->port.count) == 0)
43938 continue;
43939 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43940 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43941diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43942index e2d4e58..40cd045 100644
43943--- a/drivers/isdn/i4l/isdn_x25iface.c
43944+++ b/drivers/isdn/i4l/isdn_x25iface.c
43945@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43946
43947
43948 static struct concap_proto_ops ix25_pops = {
43949- &isdn_x25iface_proto_new,
43950- &isdn_x25iface_proto_del,
43951- &isdn_x25iface_proto_restart,
43952- &isdn_x25iface_proto_close,
43953- &isdn_x25iface_xmit,
43954- &isdn_x25iface_receive,
43955- &isdn_x25iface_connect_ind,
43956- &isdn_x25iface_disconn_ind
43957+ .proto_new = &isdn_x25iface_proto_new,
43958+ .proto_del = &isdn_x25iface_proto_del,
43959+ .restart = &isdn_x25iface_proto_restart,
43960+ .close = &isdn_x25iface_proto_close,
43961+ .encap_and_xmit = &isdn_x25iface_xmit,
43962+ .data_ind = &isdn_x25iface_receive,
43963+ .connect_ind = &isdn_x25iface_connect_ind,
43964+ .disconn_ind = &isdn_x25iface_disconn_ind
43965 };
43966
43967 /* error message helper function */
43968diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43969index 6a7447c..b4987ea 100644
43970--- a/drivers/isdn/icn/icn.c
43971+++ b/drivers/isdn/icn/icn.c
43972@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43973 if (count > len)
43974 count = len;
43975 if (user) {
43976- if (copy_from_user(msg, buf, count))
43977+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43978 return -EFAULT;
43979 } else
43980 memcpy(msg, buf, count);
43981@@ -1609,7 +1609,7 @@ icn_setup(char *line)
43982 if (ints[0] > 1)
43983 membase = (unsigned long)ints[2];
43984 if (str && *str) {
43985- strcpy(sid, str);
43986+ strlcpy(sid, str, sizeof(sid));
43987 icn_id = sid;
43988 if ((p = strchr(sid, ','))) {
43989 *p++ = 0;
43990diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43991index 87f7dff..7300125 100644
43992--- a/drivers/isdn/mISDN/dsp_cmx.c
43993+++ b/drivers/isdn/mISDN/dsp_cmx.c
43994@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43995 static u16 dsp_count; /* last sample count */
43996 static int dsp_count_valid; /* if we have last sample count */
43997
43998-void
43999+void __intentional_overflow(-1)
44000 dsp_cmx_send(void *arg)
44001 {
44002 struct dsp_conf *conf;
44003diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44004index 0f9ed1e..2715d6f 100644
44005--- a/drivers/leds/leds-clevo-mail.c
44006+++ b/drivers/leds/leds-clevo-mail.c
44007@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44008 * detected as working, but in reality it is not) as low as
44009 * possible.
44010 */
44011-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44012+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44013 {
44014 .callback = clevo_mail_led_dmi_callback,
44015 .ident = "Clevo D410J",
44016diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44017index 046cb70..6b20d39 100644
44018--- a/drivers/leds/leds-ss4200.c
44019+++ b/drivers/leds/leds-ss4200.c
44020@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44021 * detected as working, but in reality it is not) as low as
44022 * possible.
44023 */
44024-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44025+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44026 {
44027 .callback = ss4200_led_dmi_callback,
44028 .ident = "Intel SS4200-E",
44029diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44030index 6590558..a74c5dd 100644
44031--- a/drivers/lguest/core.c
44032+++ b/drivers/lguest/core.c
44033@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44034 * The end address needs +1 because __get_vm_area allocates an
44035 * extra guard page, so we need space for that.
44036 */
44037+
44038+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44039+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44040+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44041+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44042+#else
44043 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44044 VM_ALLOC, switcher_addr, switcher_addr
44045 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44046+#endif
44047+
44048 if (!switcher_vma) {
44049 err = -ENOMEM;
44050 printk("lguest: could not map switcher pages high\n");
44051@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44052 * Now the Switcher is mapped at the right address, we can't fail!
44053 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44054 */
44055- memcpy(switcher_vma->addr, start_switcher_text,
44056+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44057 end_switcher_text - start_switcher_text);
44058
44059 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44060diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44061index e8b55c3..3514c37 100644
44062--- a/drivers/lguest/page_tables.c
44063+++ b/drivers/lguest/page_tables.c
44064@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44065 /*:*/
44066
44067 #ifdef CONFIG_X86_PAE
44068-static void release_pmd(pmd_t *spmd)
44069+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44070 {
44071 /* If the entry's not present, there's nothing to release. */
44072 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44073diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44074index 922a1ac..9dd0c2a 100644
44075--- a/drivers/lguest/x86/core.c
44076+++ b/drivers/lguest/x86/core.c
44077@@ -59,7 +59,7 @@ static struct {
44078 /* Offset from where switcher.S was compiled to where we've copied it */
44079 static unsigned long switcher_offset(void)
44080 {
44081- return switcher_addr - (unsigned long)start_switcher_text;
44082+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44083 }
44084
44085 /* This cpu's struct lguest_pages (after the Switcher text page) */
44086@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44087 * These copies are pretty cheap, so we do them unconditionally: */
44088 /* Save the current Host top-level page directory.
44089 */
44090+
44091+#ifdef CONFIG_PAX_PER_CPU_PGD
44092+ pages->state.host_cr3 = read_cr3();
44093+#else
44094 pages->state.host_cr3 = __pa(current->mm->pgd);
44095+#endif
44096+
44097 /*
44098 * Set up the Guest's page tables to see this CPU's pages (and no
44099 * other CPU's pages).
44100@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44101 * compiled-in switcher code and the high-mapped copy we just made.
44102 */
44103 for (i = 0; i < IDT_ENTRIES; i++)
44104- default_idt_entries[i] += switcher_offset();
44105+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44106
44107 /*
44108 * Set up the Switcher's per-cpu areas.
44109@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44110 * it will be undisturbed when we switch. To change %cs and jump we
44111 * need this structure to feed to Intel's "lcall" instruction.
44112 */
44113- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44114+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44115 lguest_entry.segment = LGUEST_CS;
44116
44117 /*
44118diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44119index 40634b0..4f5855e 100644
44120--- a/drivers/lguest/x86/switcher_32.S
44121+++ b/drivers/lguest/x86/switcher_32.S
44122@@ -87,6 +87,7 @@
44123 #include <asm/page.h>
44124 #include <asm/segment.h>
44125 #include <asm/lguest.h>
44126+#include <asm/processor-flags.h>
44127
44128 // We mark the start of the code to copy
44129 // It's placed in .text tho it's never run here
44130@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44131 // Changes type when we load it: damn Intel!
44132 // For after we switch over our page tables
44133 // That entry will be read-only: we'd crash.
44134+
44135+#ifdef CONFIG_PAX_KERNEXEC
44136+ mov %cr0, %edx
44137+ xor $X86_CR0_WP, %edx
44138+ mov %edx, %cr0
44139+#endif
44140+
44141 movl $(GDT_ENTRY_TSS*8), %edx
44142 ltr %dx
44143
44144@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44145 // Let's clear it again for our return.
44146 // The GDT descriptor of the Host
44147 // Points to the table after two "size" bytes
44148- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44149+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44150 // Clear "used" from type field (byte 5, bit 2)
44151- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44152+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44153+
44154+#ifdef CONFIG_PAX_KERNEXEC
44155+ mov %cr0, %eax
44156+ xor $X86_CR0_WP, %eax
44157+ mov %eax, %cr0
44158+#endif
44159
44160 // Once our page table's switched, the Guest is live!
44161 // The Host fades as we run this final step.
44162@@ -295,13 +309,12 @@ deliver_to_host:
44163 // I consulted gcc, and it gave
44164 // These instructions, which I gladly credit:
44165 leal (%edx,%ebx,8), %eax
44166- movzwl (%eax),%edx
44167- movl 4(%eax), %eax
44168- xorw %ax, %ax
44169- orl %eax, %edx
44170+ movl 4(%eax), %edx
44171+ movw (%eax), %dx
44172 // Now the address of the handler's in %edx
44173 // We call it now: its "iret" drops us home.
44174- jmp *%edx
44175+ ljmp $__KERNEL_CS, $1f
44176+1: jmp *%edx
44177
44178 // Every interrupt can come to us here
44179 // But we must truly tell each apart.
44180diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44181index a08e3ee..df8ade2 100644
44182--- a/drivers/md/bcache/closure.h
44183+++ b/drivers/md/bcache/closure.h
44184@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44185 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44186 struct workqueue_struct *wq)
44187 {
44188- BUG_ON(object_is_on_stack(cl));
44189+ BUG_ON(object_starts_on_stack(cl));
44190 closure_set_ip(cl);
44191 cl->fn = fn;
44192 cl->wq = wq;
44193diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44194index 1695ee5..89f18ab 100644
44195--- a/drivers/md/bitmap.c
44196+++ b/drivers/md/bitmap.c
44197@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44198 chunk_kb ? "KB" : "B");
44199 if (bitmap->storage.file) {
44200 seq_printf(seq, ", file: ");
44201- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44202+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44203 }
44204
44205 seq_printf(seq, "\n");
44206diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44207index 73f791b..8c5d3ac 100644
44208--- a/drivers/md/dm-ioctl.c
44209+++ b/drivers/md/dm-ioctl.c
44210@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44211 cmd == DM_LIST_VERSIONS_CMD)
44212 return 0;
44213
44214- if ((cmd == DM_DEV_CREATE_CMD)) {
44215+ if (cmd == DM_DEV_CREATE_CMD) {
44216 if (!*param->name) {
44217 DMWARN("name not supplied when creating device");
44218 return -EINVAL;
44219diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44220index 089d627..ef7352e 100644
44221--- a/drivers/md/dm-raid1.c
44222+++ b/drivers/md/dm-raid1.c
44223@@ -40,7 +40,7 @@ enum dm_raid1_error {
44224
44225 struct mirror {
44226 struct mirror_set *ms;
44227- atomic_t error_count;
44228+ atomic_unchecked_t error_count;
44229 unsigned long error_type;
44230 struct dm_dev *dev;
44231 sector_t offset;
44232@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44233 struct mirror *m;
44234
44235 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44236- if (!atomic_read(&m->error_count))
44237+ if (!atomic_read_unchecked(&m->error_count))
44238 return m;
44239
44240 return NULL;
44241@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44242 * simple way to tell if a device has encountered
44243 * errors.
44244 */
44245- atomic_inc(&m->error_count);
44246+ atomic_inc_unchecked(&m->error_count);
44247
44248 if (test_and_set_bit(error_type, &m->error_type))
44249 return;
44250@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44251 struct mirror *m = get_default_mirror(ms);
44252
44253 do {
44254- if (likely(!atomic_read(&m->error_count)))
44255+ if (likely(!atomic_read_unchecked(&m->error_count)))
44256 return m;
44257
44258 if (m-- == ms->mirror)
44259@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44260 {
44261 struct mirror *default_mirror = get_default_mirror(m->ms);
44262
44263- return !atomic_read(&default_mirror->error_count);
44264+ return !atomic_read_unchecked(&default_mirror->error_count);
44265 }
44266
44267 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44268@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44269 */
44270 if (likely(region_in_sync(ms, region, 1)))
44271 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44272- else if (m && atomic_read(&m->error_count))
44273+ else if (m && atomic_read_unchecked(&m->error_count))
44274 m = NULL;
44275
44276 if (likely(m))
44277@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44278 }
44279
44280 ms->mirror[mirror].ms = ms;
44281- atomic_set(&(ms->mirror[mirror].error_count), 0);
44282+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44283 ms->mirror[mirror].error_type = 0;
44284 ms->mirror[mirror].offset = offset;
44285
44286@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44287 */
44288 static char device_status_char(struct mirror *m)
44289 {
44290- if (!atomic_read(&(m->error_count)))
44291+ if (!atomic_read_unchecked(&(m->error_count)))
44292 return 'A';
44293
44294 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44295diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44296index f478a4c..4b8e5ef 100644
44297--- a/drivers/md/dm-stats.c
44298+++ b/drivers/md/dm-stats.c
44299@@ -382,7 +382,7 @@ do_sync_free:
44300 synchronize_rcu_expedited();
44301 dm_stat_free(&s->rcu_head);
44302 } else {
44303- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44304+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44305 call_rcu(&s->rcu_head, dm_stat_free);
44306 }
44307 return 0;
44308@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44309 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44310 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44311 ));
44312- ACCESS_ONCE(last->last_sector) = end_sector;
44313- ACCESS_ONCE(last->last_rw) = bi_rw;
44314+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44315+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44316 }
44317
44318 rcu_read_lock();
44319diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44320index f8b37d4..5c5cafd 100644
44321--- a/drivers/md/dm-stripe.c
44322+++ b/drivers/md/dm-stripe.c
44323@@ -21,7 +21,7 @@ struct stripe {
44324 struct dm_dev *dev;
44325 sector_t physical_start;
44326
44327- atomic_t error_count;
44328+ atomic_unchecked_t error_count;
44329 };
44330
44331 struct stripe_c {
44332@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44333 kfree(sc);
44334 return r;
44335 }
44336- atomic_set(&(sc->stripe[i].error_count), 0);
44337+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44338 }
44339
44340 ti->private = sc;
44341@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44342 DMEMIT("%d ", sc->stripes);
44343 for (i = 0; i < sc->stripes; i++) {
44344 DMEMIT("%s ", sc->stripe[i].dev->name);
44345- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44346+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44347 'D' : 'A';
44348 }
44349 buffer[i] = '\0';
44350@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44351 */
44352 for (i = 0; i < sc->stripes; i++)
44353 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44354- atomic_inc(&(sc->stripe[i].error_count));
44355- if (atomic_read(&(sc->stripe[i].error_count)) <
44356+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44357+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44358 DM_IO_ERROR_THRESHOLD)
44359 schedule_work(&sc->trigger_event);
44360 }
44361diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44362index 3afae9e..4e1c954 100644
44363--- a/drivers/md/dm-table.c
44364+++ b/drivers/md/dm-table.c
44365@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44366 if (!dev_size)
44367 return 0;
44368
44369- if ((start >= dev_size) || (start + len > dev_size)) {
44370+ if ((start >= dev_size) || (len > dev_size - start)) {
44371 DMWARN("%s: %s too small for target: "
44372 "start=%llu, len=%llu, dev_size=%llu",
44373 dm_device_name(ti->table->md), bdevname(bdev, b),
44374diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44375index 43adbb8..7b34305 100644
44376--- a/drivers/md/dm-thin-metadata.c
44377+++ b/drivers/md/dm-thin-metadata.c
44378@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44379 {
44380 pmd->info.tm = pmd->tm;
44381 pmd->info.levels = 2;
44382- pmd->info.value_type.context = pmd->data_sm;
44383+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44384 pmd->info.value_type.size = sizeof(__le64);
44385 pmd->info.value_type.inc = data_block_inc;
44386 pmd->info.value_type.dec = data_block_dec;
44387@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44388
44389 pmd->bl_info.tm = pmd->tm;
44390 pmd->bl_info.levels = 1;
44391- pmd->bl_info.value_type.context = pmd->data_sm;
44392+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44393 pmd->bl_info.value_type.size = sizeof(__le64);
44394 pmd->bl_info.value_type.inc = data_block_inc;
44395 pmd->bl_info.value_type.dec = data_block_dec;
44396diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44397index 64b10e0..07db8f4 100644
44398--- a/drivers/md/dm.c
44399+++ b/drivers/md/dm.c
44400@@ -185,9 +185,9 @@ struct mapped_device {
44401 /*
44402 * Event handling.
44403 */
44404- atomic_t event_nr;
44405+ atomic_unchecked_t event_nr;
44406 wait_queue_head_t eventq;
44407- atomic_t uevent_seq;
44408+ atomic_unchecked_t uevent_seq;
44409 struct list_head uevent_list;
44410 spinlock_t uevent_lock; /* Protect access to uevent_list */
44411
44412@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44413 spin_lock_init(&md->deferred_lock);
44414 atomic_set(&md->holders, 1);
44415 atomic_set(&md->open_count, 0);
44416- atomic_set(&md->event_nr, 0);
44417- atomic_set(&md->uevent_seq, 0);
44418+ atomic_set_unchecked(&md->event_nr, 0);
44419+ atomic_set_unchecked(&md->uevent_seq, 0);
44420 INIT_LIST_HEAD(&md->uevent_list);
44421 INIT_LIST_HEAD(&md->table_devices);
44422 spin_lock_init(&md->uevent_lock);
44423@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44424
44425 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44426
44427- atomic_inc(&md->event_nr);
44428+ atomic_inc_unchecked(&md->event_nr);
44429 wake_up(&md->eventq);
44430 }
44431
44432@@ -3034,18 +3034,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44433
44434 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44435 {
44436- return atomic_add_return(1, &md->uevent_seq);
44437+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44438 }
44439
44440 uint32_t dm_get_event_nr(struct mapped_device *md)
44441 {
44442- return atomic_read(&md->event_nr);
44443+ return atomic_read_unchecked(&md->event_nr);
44444 }
44445
44446 int dm_wait_event(struct mapped_device *md, int event_nr)
44447 {
44448 return wait_event_interruptible(md->eventq,
44449- (event_nr != atomic_read(&md->event_nr)));
44450+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44451 }
44452
44453 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44454diff --git a/drivers/md/md.c b/drivers/md/md.c
44455index 709755f..5bc3fa4 100644
44456--- a/drivers/md/md.c
44457+++ b/drivers/md/md.c
44458@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44459 * start build, activate spare
44460 */
44461 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44462-static atomic_t md_event_count;
44463+static atomic_unchecked_t md_event_count;
44464 void md_new_event(struct mddev *mddev)
44465 {
44466- atomic_inc(&md_event_count);
44467+ atomic_inc_unchecked(&md_event_count);
44468 wake_up(&md_event_waiters);
44469 }
44470 EXPORT_SYMBOL_GPL(md_new_event);
44471@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44472 */
44473 static void md_new_event_inintr(struct mddev *mddev)
44474 {
44475- atomic_inc(&md_event_count);
44476+ atomic_inc_unchecked(&md_event_count);
44477 wake_up(&md_event_waiters);
44478 }
44479
44480@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44481 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44482 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44483 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44484- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44485+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44486
44487 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44488 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44489@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44490 else
44491 sb->resync_offset = cpu_to_le64(0);
44492
44493- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44494+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44495
44496 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44497 sb->size = cpu_to_le64(mddev->dev_sectors);
44498@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44499 static ssize_t
44500 errors_show(struct md_rdev *rdev, char *page)
44501 {
44502- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44503+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44504 }
44505
44506 static ssize_t
44507@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44508 char *e;
44509 unsigned long n = simple_strtoul(buf, &e, 10);
44510 if (*buf && (*e == 0 || *e == '\n')) {
44511- atomic_set(&rdev->corrected_errors, n);
44512+ atomic_set_unchecked(&rdev->corrected_errors, n);
44513 return len;
44514 }
44515 return -EINVAL;
44516@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44517 rdev->sb_loaded = 0;
44518 rdev->bb_page = NULL;
44519 atomic_set(&rdev->nr_pending, 0);
44520- atomic_set(&rdev->read_errors, 0);
44521- atomic_set(&rdev->corrected_errors, 0);
44522+ atomic_set_unchecked(&rdev->read_errors, 0);
44523+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44524
44525 INIT_LIST_HEAD(&rdev->same_set);
44526 init_waitqueue_head(&rdev->blocked_wait);
44527@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44528
44529 spin_unlock(&pers_lock);
44530 seq_printf(seq, "\n");
44531- seq->poll_event = atomic_read(&md_event_count);
44532+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44533 return 0;
44534 }
44535 if (v == (void*)2) {
44536@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44537 return error;
44538
44539 seq = file->private_data;
44540- seq->poll_event = atomic_read(&md_event_count);
44541+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44542 return error;
44543 }
44544
44545@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44546 /* always allow read */
44547 mask = POLLIN | POLLRDNORM;
44548
44549- if (seq->poll_event != atomic_read(&md_event_count))
44550+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44551 mask |= POLLERR | POLLPRI;
44552 return mask;
44553 }
44554@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44555 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44556 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44557 (int)part_stat_read(&disk->part0, sectors[1]) -
44558- atomic_read(&disk->sync_io);
44559+ atomic_read_unchecked(&disk->sync_io);
44560 /* sync IO will cause sync_io to increase before the disk_stats
44561 * as sync_io is counted when a request starts, and
44562 * disk_stats is counted when it completes.
44563diff --git a/drivers/md/md.h b/drivers/md/md.h
44564index 03cec5b..0a658c1 100644
44565--- a/drivers/md/md.h
44566+++ b/drivers/md/md.h
44567@@ -94,13 +94,13 @@ struct md_rdev {
44568 * only maintained for arrays that
44569 * support hot removal
44570 */
44571- atomic_t read_errors; /* number of consecutive read errors that
44572+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44573 * we have tried to ignore.
44574 */
44575 struct timespec last_read_error; /* monotonic time since our
44576 * last read error
44577 */
44578- atomic_t corrected_errors; /* number of corrected read errors,
44579+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44580 * for reporting to userspace and storing
44581 * in superblock.
44582 */
44583@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44584
44585 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44586 {
44587- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44588+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44589 }
44590
44591 struct md_personality
44592diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44593index e8a9042..35bd145 100644
44594--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44595+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44596@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44597 * Flick into a mode where all blocks get allocated in the new area.
44598 */
44599 smm->begin = old_len;
44600- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44601+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44602
44603 /*
44604 * Extend.
44605@@ -714,7 +714,7 @@ out:
44606 /*
44607 * Switch back to normal behaviour.
44608 */
44609- memcpy(sm, &ops, sizeof(*sm));
44610+ memcpy((void *)sm, &ops, sizeof(*sm));
44611 return r;
44612 }
44613
44614diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44615index 3e6d115..ffecdeb 100644
44616--- a/drivers/md/persistent-data/dm-space-map.h
44617+++ b/drivers/md/persistent-data/dm-space-map.h
44618@@ -71,6 +71,7 @@ struct dm_space_map {
44619 dm_sm_threshold_fn fn,
44620 void *context);
44621 };
44622+typedef struct dm_space_map __no_const dm_space_map_no_const;
44623
44624 /*----------------------------------------------------------------*/
44625
44626diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44627index 2f2f38f..f6a8ebe 100644
44628--- a/drivers/md/raid1.c
44629+++ b/drivers/md/raid1.c
44630@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44631 if (r1_sync_page_io(rdev, sect, s,
44632 bio->bi_io_vec[idx].bv_page,
44633 READ) != 0)
44634- atomic_add(s, &rdev->corrected_errors);
44635+ atomic_add_unchecked(s, &rdev->corrected_errors);
44636 }
44637 sectors -= s;
44638 sect += s;
44639@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44640 !test_bit(Faulty, &rdev->flags)) {
44641 if (r1_sync_page_io(rdev, sect, s,
44642 conf->tmppage, READ)) {
44643- atomic_add(s, &rdev->corrected_errors);
44644+ atomic_add_unchecked(s, &rdev->corrected_errors);
44645 printk(KERN_INFO
44646 "md/raid1:%s: read error corrected "
44647 "(%d sectors at %llu on %s)\n",
44648diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44649index 32e282f..5cec803 100644
44650--- a/drivers/md/raid10.c
44651+++ b/drivers/md/raid10.c
44652@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44653 /* The write handler will notice the lack of
44654 * R10BIO_Uptodate and record any errors etc
44655 */
44656- atomic_add(r10_bio->sectors,
44657+ atomic_add_unchecked(r10_bio->sectors,
44658 &conf->mirrors[d].rdev->corrected_errors);
44659
44660 /* for reconstruct, we always reschedule after a read.
44661@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44662 {
44663 struct timespec cur_time_mon;
44664 unsigned long hours_since_last;
44665- unsigned int read_errors = atomic_read(&rdev->read_errors);
44666+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44667
44668 ktime_get_ts(&cur_time_mon);
44669
44670@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44671 * overflowing the shift of read_errors by hours_since_last.
44672 */
44673 if (hours_since_last >= 8 * sizeof(read_errors))
44674- atomic_set(&rdev->read_errors, 0);
44675+ atomic_set_unchecked(&rdev->read_errors, 0);
44676 else
44677- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44678+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44679 }
44680
44681 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44682@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44683 return;
44684
44685 check_decay_read_errors(mddev, rdev);
44686- atomic_inc(&rdev->read_errors);
44687- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44688+ atomic_inc_unchecked(&rdev->read_errors);
44689+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44690 char b[BDEVNAME_SIZE];
44691 bdevname(rdev->bdev, b);
44692
44693@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44694 "md/raid10:%s: %s: Raid device exceeded "
44695 "read_error threshold [cur %d:max %d]\n",
44696 mdname(mddev), b,
44697- atomic_read(&rdev->read_errors), max_read_errors);
44698+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44699 printk(KERN_NOTICE
44700 "md/raid10:%s: %s: Failing raid device\n",
44701 mdname(mddev), b);
44702@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44703 sect +
44704 choose_data_offset(r10_bio, rdev)),
44705 bdevname(rdev->bdev, b));
44706- atomic_add(s, &rdev->corrected_errors);
44707+ atomic_add_unchecked(s, &rdev->corrected_errors);
44708 }
44709
44710 rdev_dec_pending(rdev, mddev);
44711diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44712index 8577cc7..e80e05d 100644
44713--- a/drivers/md/raid5.c
44714+++ b/drivers/md/raid5.c
44715@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44716 return 1;
44717 }
44718
44719+#ifdef CONFIG_GRKERNSEC_HIDESYM
44720+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44721+#endif
44722+
44723 static int grow_stripes(struct r5conf *conf, int num)
44724 {
44725 struct kmem_cache *sc;
44726@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44727 "raid%d-%s", conf->level, mdname(conf->mddev));
44728 else
44729 sprintf(conf->cache_name[0],
44730+#ifdef CONFIG_GRKERNSEC_HIDESYM
44731+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44732+#else
44733 "raid%d-%p", conf->level, conf->mddev);
44734+#endif
44735 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44736
44737 conf->active_name = 0;
44738@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44739 mdname(conf->mddev), STRIPE_SECTORS,
44740 (unsigned long long)s,
44741 bdevname(rdev->bdev, b));
44742- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44743+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44744 clear_bit(R5_ReadError, &sh->dev[i].flags);
44745 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44746 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44747 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44748
44749- if (atomic_read(&rdev->read_errors))
44750- atomic_set(&rdev->read_errors, 0);
44751+ if (atomic_read_unchecked(&rdev->read_errors))
44752+ atomic_set_unchecked(&rdev->read_errors, 0);
44753 } else {
44754 const char *bdn = bdevname(rdev->bdev, b);
44755 int retry = 0;
44756 int set_bad = 0;
44757
44758 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44759- atomic_inc(&rdev->read_errors);
44760+ atomic_inc_unchecked(&rdev->read_errors);
44761 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44762 printk_ratelimited(
44763 KERN_WARNING
44764@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44765 mdname(conf->mddev),
44766 (unsigned long long)s,
44767 bdn);
44768- } else if (atomic_read(&rdev->read_errors)
44769+ } else if (atomic_read_unchecked(&rdev->read_errors)
44770 > conf->max_nr_stripes)
44771 printk(KERN_WARNING
44772 "md/raid:%s: Too many read errors, failing device %s.\n",
44773diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44774index 983db75..ef9248c 100644
44775--- a/drivers/media/dvb-core/dvbdev.c
44776+++ b/drivers/media/dvb-core/dvbdev.c
44777@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44778 const struct dvb_device *template, void *priv, int type)
44779 {
44780 struct dvb_device *dvbdev;
44781- struct file_operations *dvbdevfops;
44782+ file_operations_no_const *dvbdevfops;
44783 struct device *clsdev;
44784 int minor;
44785 int id;
44786diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44787index 6ad22b6..6e90e2a 100644
44788--- a/drivers/media/dvb-frontends/af9033.h
44789+++ b/drivers/media/dvb-frontends/af9033.h
44790@@ -96,6 +96,6 @@ struct af9033_ops {
44791 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44792 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44793 int onoff);
44794-};
44795+} __no_const;
44796
44797 #endif /* AF9033_H */
44798diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44799index 9b6c3bb..baeb5c7 100644
44800--- a/drivers/media/dvb-frontends/dib3000.h
44801+++ b/drivers/media/dvb-frontends/dib3000.h
44802@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44803 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44804 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44805 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44806-};
44807+} __no_const;
44808
44809 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44810 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44811diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44812index 1fea0e9..321ce8f 100644
44813--- a/drivers/media/dvb-frontends/dib7000p.h
44814+++ b/drivers/media/dvb-frontends/dib7000p.h
44815@@ -64,7 +64,7 @@ struct dib7000p_ops {
44816 int (*get_adc_power)(struct dvb_frontend *fe);
44817 int (*slave_reset)(struct dvb_frontend *fe);
44818 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44819-};
44820+} __no_const;
44821
44822 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44823 void *dib7000p_attach(struct dib7000p_ops *ops);
44824diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44825index 84cc103..5780c54 100644
44826--- a/drivers/media/dvb-frontends/dib8000.h
44827+++ b/drivers/media/dvb-frontends/dib8000.h
44828@@ -61,7 +61,7 @@ struct dib8000_ops {
44829 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44830 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44831 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44832-};
44833+} __no_const;
44834
44835 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44836 void *dib8000_attach(struct dib8000_ops *ops);
44837diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44838index 860c98fc..497fa25 100644
44839--- a/drivers/media/pci/cx88/cx88-video.c
44840+++ b/drivers/media/pci/cx88/cx88-video.c
44841@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44842
44843 /* ------------------------------------------------------------------ */
44844
44845-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44846-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44847-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44848+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44849+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44850+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44851
44852 module_param_array(video_nr, int, NULL, 0444);
44853 module_param_array(vbi_nr, int, NULL, 0444);
44854diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44855index 802642d..5534900 100644
44856--- a/drivers/media/pci/ivtv/ivtv-driver.c
44857+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44858@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44859 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44860
44861 /* ivtv instance counter */
44862-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44863+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44864
44865 /* Parameter declarations */
44866 static int cardtype[IVTV_MAX_CARDS];
44867diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44868index 8cbe6b4..ea3601c 100644
44869--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44870+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44871@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44872
44873 static int solo_sysfs_init(struct solo_dev *solo_dev)
44874 {
44875- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44876+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44877 struct device *dev = &solo_dev->dev;
44878 const char *driver;
44879 int i;
44880diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44881index c7141f2..5301fec 100644
44882--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44883+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44884@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44885
44886 int solo_g723_init(struct solo_dev *solo_dev)
44887 {
44888- static struct snd_device_ops ops = { NULL };
44889+ static struct snd_device_ops ops = { };
44890 struct snd_card *card;
44891 struct snd_kcontrol_new kctl;
44892 char name[32];
44893diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44894index 8c84846..27b4f83 100644
44895--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44896+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44897@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44898
44899 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44900 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44901- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44902+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44903 if (p2m_id < 0)
44904 p2m_id = -p2m_id;
44905 }
44906diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44907index bd8edfa..e82ed85 100644
44908--- a/drivers/media/pci/solo6x10/solo6x10.h
44909+++ b/drivers/media/pci/solo6x10/solo6x10.h
44910@@ -220,7 +220,7 @@ struct solo_dev {
44911
44912 /* P2M DMA Engine */
44913 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44914- atomic_t p2m_count;
44915+ atomic_unchecked_t p2m_count;
44916 int p2m_jiffies;
44917 unsigned int p2m_timeouts;
44918
44919diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44920index c135165..dc69499 100644
44921--- a/drivers/media/pci/tw68/tw68-core.c
44922+++ b/drivers/media/pci/tw68/tw68-core.c
44923@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
44924 module_param_array(card, int, NULL, 0444);
44925 MODULE_PARM_DESC(card, "card type");
44926
44927-static atomic_t tw68_instance = ATOMIC_INIT(0);
44928+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
44929
44930 /* ------------------------------------------------------------------ */
44931
44932diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44933index ba2d8f9..1566684 100644
44934--- a/drivers/media/platform/omap/omap_vout.c
44935+++ b/drivers/media/platform/omap/omap_vout.c
44936@@ -63,7 +63,6 @@ enum omap_vout_channels {
44937 OMAP_VIDEO2,
44938 };
44939
44940-static struct videobuf_queue_ops video_vbq_ops;
44941 /* Variables configurable through module params*/
44942 static u32 video1_numbuffers = 3;
44943 static u32 video2_numbuffers = 3;
44944@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
44945 {
44946 struct videobuf_queue *q;
44947 struct omap_vout_device *vout = NULL;
44948+ static struct videobuf_queue_ops video_vbq_ops = {
44949+ .buf_setup = omap_vout_buffer_setup,
44950+ .buf_prepare = omap_vout_buffer_prepare,
44951+ .buf_release = omap_vout_buffer_release,
44952+ .buf_queue = omap_vout_buffer_queue,
44953+ };
44954
44955 vout = video_drvdata(file);
44956 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44957@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
44958 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44959
44960 q = &vout->vbq;
44961- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44962- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44963- video_vbq_ops.buf_release = omap_vout_buffer_release;
44964- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44965 spin_lock_init(&vout->vbq_lock);
44966
44967 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44968diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44969index fb2acc5..a2fcbdc4 100644
44970--- a/drivers/media/platform/s5p-tv/mixer.h
44971+++ b/drivers/media/platform/s5p-tv/mixer.h
44972@@ -156,7 +156,7 @@ struct mxr_layer {
44973 /** layer index (unique identifier) */
44974 int idx;
44975 /** callbacks for layer methods */
44976- struct mxr_layer_ops ops;
44977+ struct mxr_layer_ops *ops;
44978 /** format array */
44979 const struct mxr_format **fmt_array;
44980 /** size of format array */
44981diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44982index 74344c7..a39e70e 100644
44983--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44984+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44985@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44986 {
44987 struct mxr_layer *layer;
44988 int ret;
44989- struct mxr_layer_ops ops = {
44990+ static struct mxr_layer_ops ops = {
44991 .release = mxr_graph_layer_release,
44992 .buffer_set = mxr_graph_buffer_set,
44993 .stream_set = mxr_graph_stream_set,
44994diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44995index b713403..53cb5ad 100644
44996--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44997+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44998@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44999 layer->update_buf = next;
45000 }
45001
45002- layer->ops.buffer_set(layer, layer->update_buf);
45003+ layer->ops->buffer_set(layer, layer->update_buf);
45004
45005 if (done && done != layer->shadow_buf)
45006 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45007diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45008index b4d2696..91df48e 100644
45009--- a/drivers/media/platform/s5p-tv/mixer_video.c
45010+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45011@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45012 layer->geo.src.height = layer->geo.src.full_height;
45013
45014 mxr_geometry_dump(mdev, &layer->geo);
45015- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45016+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45017 mxr_geometry_dump(mdev, &layer->geo);
45018 }
45019
45020@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45021 layer->geo.dst.full_width = mbus_fmt.width;
45022 layer->geo.dst.full_height = mbus_fmt.height;
45023 layer->geo.dst.field = mbus_fmt.field;
45024- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45025+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45026
45027 mxr_geometry_dump(mdev, &layer->geo);
45028 }
45029@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45030 /* set source size to highest accepted value */
45031 geo->src.full_width = max(geo->dst.full_width, pix->width);
45032 geo->src.full_height = max(geo->dst.full_height, pix->height);
45033- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45034+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45035 mxr_geometry_dump(mdev, &layer->geo);
45036 /* set cropping to total visible screen */
45037 geo->src.width = pix->width;
45038@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45039 geo->src.x_offset = 0;
45040 geo->src.y_offset = 0;
45041 /* assure consistency of geometry */
45042- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45043+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45044 mxr_geometry_dump(mdev, &layer->geo);
45045 /* set full size to lowest possible value */
45046 geo->src.full_width = 0;
45047 geo->src.full_height = 0;
45048- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45049+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45050 mxr_geometry_dump(mdev, &layer->geo);
45051
45052 /* returning results */
45053@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45054 target->width = s->r.width;
45055 target->height = s->r.height;
45056
45057- layer->ops.fix_geometry(layer, stage, s->flags);
45058+ layer->ops->fix_geometry(layer, stage, s->flags);
45059
45060 /* retrieve update selection rectangle */
45061 res.left = target->x_offset;
45062@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45063 mxr_output_get(mdev);
45064
45065 mxr_layer_update_output(layer);
45066- layer->ops.format_set(layer);
45067+ layer->ops->format_set(layer);
45068 /* enabling layer in hardware */
45069 spin_lock_irqsave(&layer->enq_slock, flags);
45070 layer->state = MXR_LAYER_STREAMING;
45071 spin_unlock_irqrestore(&layer->enq_slock, flags);
45072
45073- layer->ops.stream_set(layer, MXR_ENABLE);
45074+ layer->ops->stream_set(layer, MXR_ENABLE);
45075 mxr_streamer_get(mdev);
45076
45077 return 0;
45078@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
45079 spin_unlock_irqrestore(&layer->enq_slock, flags);
45080
45081 /* disabling layer in hardware */
45082- layer->ops.stream_set(layer, MXR_DISABLE);
45083+ layer->ops->stream_set(layer, MXR_DISABLE);
45084 /* remove one streamer */
45085 mxr_streamer_put(mdev);
45086 /* allow changes in output configuration */
45087@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45088
45089 void mxr_layer_release(struct mxr_layer *layer)
45090 {
45091- if (layer->ops.release)
45092- layer->ops.release(layer);
45093+ if (layer->ops->release)
45094+ layer->ops->release(layer);
45095 }
45096
45097 void mxr_base_layer_release(struct mxr_layer *layer)
45098@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45099
45100 layer->mdev = mdev;
45101 layer->idx = idx;
45102- layer->ops = *ops;
45103+ layer->ops = ops;
45104
45105 spin_lock_init(&layer->enq_slock);
45106 INIT_LIST_HEAD(&layer->enq_list);
45107diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45108index c9388c4..ce71ece 100644
45109--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45110+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45111@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45112 {
45113 struct mxr_layer *layer;
45114 int ret;
45115- struct mxr_layer_ops ops = {
45116+ static struct mxr_layer_ops ops = {
45117 .release = mxr_vp_layer_release,
45118 .buffer_set = mxr_vp_buffer_set,
45119 .stream_set = mxr_vp_stream_set,
45120diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45121index 82affae..42833ec 100644
45122--- a/drivers/media/radio/radio-cadet.c
45123+++ b/drivers/media/radio/radio-cadet.c
45124@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45125 unsigned char readbuf[RDS_BUFFER];
45126 int i = 0;
45127
45128+ if (count > RDS_BUFFER)
45129+ return -EFAULT;
45130 mutex_lock(&dev->lock);
45131 if (dev->rdsstat == 0)
45132 cadet_start_rds(dev);
45133@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45134 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45135 mutex_unlock(&dev->lock);
45136
45137- if (i && copy_to_user(data, readbuf, i))
45138- return -EFAULT;
45139+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45140+ i = -EFAULT;
45141+
45142 return i;
45143 }
45144
45145diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45146index 5236035..c622c74 100644
45147--- a/drivers/media/radio/radio-maxiradio.c
45148+++ b/drivers/media/radio/radio-maxiradio.c
45149@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45150 /* TEA5757 pin mappings */
45151 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45152
45153-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45154+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45155
45156 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45157 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45158diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45159index 050b3bb..79f62b9 100644
45160--- a/drivers/media/radio/radio-shark.c
45161+++ b/drivers/media/radio/radio-shark.c
45162@@ -79,7 +79,7 @@ struct shark_device {
45163 u32 last_val;
45164 };
45165
45166-static atomic_t shark_instance = ATOMIC_INIT(0);
45167+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45168
45169 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45170 {
45171diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45172index 8654e0d..0608a64 100644
45173--- a/drivers/media/radio/radio-shark2.c
45174+++ b/drivers/media/radio/radio-shark2.c
45175@@ -74,7 +74,7 @@ struct shark_device {
45176 u8 *transfer_buffer;
45177 };
45178
45179-static atomic_t shark_instance = ATOMIC_INIT(0);
45180+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45181
45182 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45183 {
45184diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45185index dccf586..d5db411 100644
45186--- a/drivers/media/radio/radio-si476x.c
45187+++ b/drivers/media/radio/radio-si476x.c
45188@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45189 struct si476x_radio *radio;
45190 struct v4l2_ctrl *ctrl;
45191
45192- static atomic_t instance = ATOMIC_INIT(0);
45193+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45194
45195 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45196 if (!radio)
45197diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45198index 704397f..4d05977 100644
45199--- a/drivers/media/radio/wl128x/fmdrv_common.c
45200+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45201@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45202 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45203
45204 /* Radio Nr */
45205-static u32 radio_nr = -1;
45206+static int radio_nr = -1;
45207 module_param(radio_nr, int, 0444);
45208 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45209
45210diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45211index 9fd1527..8927230 100644
45212--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45213+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45214@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45215
45216 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45217 {
45218- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45219- char result[64];
45220- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45221- sizeof(result), 0);
45222+ char *buf;
45223+ char *result;
45224+ int retval;
45225+
45226+ buf = kmalloc(2, GFP_KERNEL);
45227+ if (buf == NULL)
45228+ return -ENOMEM;
45229+ result = kmalloc(64, GFP_KERNEL);
45230+ if (result == NULL) {
45231+ kfree(buf);
45232+ return -ENOMEM;
45233+ }
45234+
45235+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45236+ buf[1] = enable ? 1 : 0;
45237+
45238+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45239+
45240+ kfree(buf);
45241+ kfree(result);
45242+ return retval;
45243 }
45244
45245 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45246 {
45247- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45248- char state[3];
45249- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45250+ char *buf;
45251+ char *state;
45252+ int retval;
45253+
45254+ buf = kmalloc(2, GFP_KERNEL);
45255+ if (buf == NULL)
45256+ return -ENOMEM;
45257+ state = kmalloc(3, GFP_KERNEL);
45258+ if (state == NULL) {
45259+ kfree(buf);
45260+ return -ENOMEM;
45261+ }
45262+
45263+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45264+ buf[1] = enable ? 1 : 0;
45265+
45266+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45267+
45268+ kfree(buf);
45269+ kfree(state);
45270+ return retval;
45271 }
45272
45273 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45274 {
45275- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45276- char state[3];
45277+ char *query;
45278+ char *state;
45279 int ret;
45280+ query = kmalloc(1, GFP_KERNEL);
45281+ if (query == NULL)
45282+ return -ENOMEM;
45283+ state = kmalloc(3, GFP_KERNEL);
45284+ if (state == NULL) {
45285+ kfree(query);
45286+ return -ENOMEM;
45287+ }
45288+
45289+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45290
45291 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45292
45293- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45294- sizeof(state), 0);
45295+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45296 if (ret < 0) {
45297 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45298 "state info\n");
45299@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45300
45301 /* Copy this pointer as we are gonna need it in the release phase */
45302 cinergyt2_usb_device = adap->dev;
45303-
45304+ kfree(query);
45305+ kfree(state);
45306 return 0;
45307 }
45308
45309@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45310 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45311 {
45312 struct cinergyt2_state *st = d->priv;
45313- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45314+ u8 *key, *cmd;
45315 int i;
45316
45317+ cmd = kmalloc(1, GFP_KERNEL);
45318+ if (cmd == NULL)
45319+ return -EINVAL;
45320+ key = kzalloc(5, GFP_KERNEL);
45321+ if (key == NULL) {
45322+ kfree(cmd);
45323+ return -EINVAL;
45324+ }
45325+
45326+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45327+
45328 *state = REMOTE_NO_KEY_PRESSED;
45329
45330- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45331+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45332 if (key[4] == 0xff) {
45333 /* key repeat */
45334 st->rc_counter++;
45335@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45336 *event = d->last_event;
45337 deb_rc("repeat key, event %x\n",
45338 *event);
45339- return 0;
45340+ goto out;
45341 }
45342 }
45343 deb_rc("repeated key (non repeatable)\n");
45344 }
45345- return 0;
45346+ goto out;
45347 }
45348
45349 /* hack to pass checksum on the custom field */
45350@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45351
45352 deb_rc("key: %*ph\n", 5, key);
45353 }
45354+out:
45355+ kfree(cmd);
45356+ kfree(key);
45357 return 0;
45358 }
45359
45360diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45361index c890fe4..f9b2ae6 100644
45362--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45363+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45364@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45365 fe_status_t *status)
45366 {
45367 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45368- struct dvbt_get_status_msg result;
45369- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45370+ struct dvbt_get_status_msg *result;
45371+ u8 *cmd;
45372 int ret;
45373
45374- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45375- sizeof(result), 0);
45376+ cmd = kmalloc(1, GFP_KERNEL);
45377+ if (cmd == NULL)
45378+ return -ENOMEM;
45379+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45380+ if (result == NULL) {
45381+ kfree(cmd);
45382+ return -ENOMEM;
45383+ }
45384+
45385+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45386+
45387+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45388+ sizeof(*result), 0);
45389 if (ret < 0)
45390- return ret;
45391+ goto out;
45392
45393 *status = 0;
45394
45395- if (0xffff - le16_to_cpu(result.gain) > 30)
45396+ if (0xffff - le16_to_cpu(result->gain) > 30)
45397 *status |= FE_HAS_SIGNAL;
45398- if (result.lock_bits & (1 << 6))
45399+ if (result->lock_bits & (1 << 6))
45400 *status |= FE_HAS_LOCK;
45401- if (result.lock_bits & (1 << 5))
45402+ if (result->lock_bits & (1 << 5))
45403 *status |= FE_HAS_SYNC;
45404- if (result.lock_bits & (1 << 4))
45405+ if (result->lock_bits & (1 << 4))
45406 *status |= FE_HAS_CARRIER;
45407- if (result.lock_bits & (1 << 1))
45408+ if (result->lock_bits & (1 << 1))
45409 *status |= FE_HAS_VITERBI;
45410
45411 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45412 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45413 *status &= ~FE_HAS_LOCK;
45414
45415- return 0;
45416+out:
45417+ kfree(cmd);
45418+ kfree(result);
45419+ return ret;
45420 }
45421
45422 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45423 {
45424 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45425- struct dvbt_get_status_msg status;
45426- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45427+ struct dvbt_get_status_msg *status;
45428+ char *cmd;
45429 int ret;
45430
45431- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45432- sizeof(status), 0);
45433+ cmd = kmalloc(1, GFP_KERNEL);
45434+ if (cmd == NULL)
45435+ return -ENOMEM;
45436+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45437+ if (status == NULL) {
45438+ kfree(cmd);
45439+ return -ENOMEM;
45440+ }
45441+
45442+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45443+
45444+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45445+ sizeof(*status), 0);
45446 if (ret < 0)
45447- return ret;
45448+ goto out;
45449
45450- *ber = le32_to_cpu(status.viterbi_error_rate);
45451+ *ber = le32_to_cpu(status->viterbi_error_rate);
45452+out:
45453+ kfree(cmd);
45454+ kfree(status);
45455 return 0;
45456 }
45457
45458 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45459 {
45460 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45461- struct dvbt_get_status_msg status;
45462- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45463+ struct dvbt_get_status_msg *status;
45464+ u8 *cmd;
45465 int ret;
45466
45467- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45468- sizeof(status), 0);
45469+ cmd = kmalloc(1, GFP_KERNEL);
45470+ if (cmd == NULL)
45471+ return -ENOMEM;
45472+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45473+ if (status == NULL) {
45474+ kfree(cmd);
45475+ return -ENOMEM;
45476+ }
45477+
45478+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45479+
45480+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45481+ sizeof(*status), 0);
45482 if (ret < 0) {
45483 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45484 ret);
45485- return ret;
45486+ goto out;
45487 }
45488- *unc = le32_to_cpu(status.uncorrected_block_count);
45489- return 0;
45490+ *unc = le32_to_cpu(status->uncorrected_block_count);
45491+
45492+out:
45493+ kfree(cmd);
45494+ kfree(status);
45495+ return ret;
45496 }
45497
45498 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45499 u16 *strength)
45500 {
45501 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45502- struct dvbt_get_status_msg status;
45503- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45504+ struct dvbt_get_status_msg *status;
45505+ char *cmd;
45506 int ret;
45507
45508- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45509- sizeof(status), 0);
45510+ cmd = kmalloc(1, GFP_KERNEL);
45511+ if (cmd == NULL)
45512+ return -ENOMEM;
45513+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45514+ if (status == NULL) {
45515+ kfree(cmd);
45516+ return -ENOMEM;
45517+ }
45518+
45519+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45520+
45521+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45522+ sizeof(*status), 0);
45523 if (ret < 0) {
45524 err("cinergyt2_fe_read_signal_strength() Failed!"
45525 " (Error=%d)\n", ret);
45526- return ret;
45527+ goto out;
45528 }
45529- *strength = (0xffff - le16_to_cpu(status.gain));
45530+ *strength = (0xffff - le16_to_cpu(status->gain));
45531+
45532+out:
45533+ kfree(cmd);
45534+ kfree(status);
45535 return 0;
45536 }
45537
45538 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45539 {
45540 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45541- struct dvbt_get_status_msg status;
45542- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45543+ struct dvbt_get_status_msg *status;
45544+ char *cmd;
45545 int ret;
45546
45547- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45548- sizeof(status), 0);
45549+ cmd = kmalloc(1, GFP_KERNEL);
45550+ if (cmd == NULL)
45551+ return -ENOMEM;
45552+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45553+ if (status == NULL) {
45554+ kfree(cmd);
45555+ return -ENOMEM;
45556+ }
45557+
45558+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45559+
45560+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45561+ sizeof(*status), 0);
45562 if (ret < 0) {
45563 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45564- return ret;
45565+ goto out;
45566 }
45567- *snr = (status.snr << 8) | status.snr;
45568- return 0;
45569+ *snr = (status->snr << 8) | status->snr;
45570+
45571+out:
45572+ kfree(cmd);
45573+ kfree(status);
45574+ return ret;
45575 }
45576
45577 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45578@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45579 {
45580 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45581 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45582- struct dvbt_set_parameters_msg param;
45583- char result[2];
45584+ struct dvbt_set_parameters_msg *param;
45585+ char *result;
45586 int err;
45587
45588- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45589- param.tps = cpu_to_le16(compute_tps(fep));
45590- param.freq = cpu_to_le32(fep->frequency / 1000);
45591- param.flags = 0;
45592+ result = kmalloc(2, GFP_KERNEL);
45593+ if (result == NULL)
45594+ return -ENOMEM;
45595+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45596+ if (param == NULL) {
45597+ kfree(result);
45598+ return -ENOMEM;
45599+ }
45600+
45601+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45602+ param->tps = cpu_to_le16(compute_tps(fep));
45603+ param->freq = cpu_to_le32(fep->frequency / 1000);
45604+ param->flags = 0;
45605
45606 switch (fep->bandwidth_hz) {
45607 default:
45608 case 8000000:
45609- param.bandwidth = 8;
45610+ param->bandwidth = 8;
45611 break;
45612 case 7000000:
45613- param.bandwidth = 7;
45614+ param->bandwidth = 7;
45615 break;
45616 case 6000000:
45617- param.bandwidth = 6;
45618+ param->bandwidth = 6;
45619 break;
45620 }
45621
45622 err = dvb_usb_generic_rw(state->d,
45623- (char *)&param, sizeof(param),
45624- result, sizeof(result), 0);
45625+ (char *)param, sizeof(*param),
45626+ result, 2, 0);
45627 if (err < 0)
45628 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45629
45630- return (err < 0) ? err : 0;
45631+ kfree(result);
45632+ kfree(param);
45633+ return err;
45634 }
45635
45636 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45637diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45638index 733a7ff..f8b52e3 100644
45639--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45640+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45641@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45642
45643 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45644 {
45645- struct hexline hx;
45646- u8 reset;
45647+ struct hexline *hx;
45648+ u8 *reset;
45649 int ret,pos=0;
45650
45651+ reset = kmalloc(1, GFP_KERNEL);
45652+ if (reset == NULL)
45653+ return -ENOMEM;
45654+
45655+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45656+ if (hx == NULL) {
45657+ kfree(reset);
45658+ return -ENOMEM;
45659+ }
45660+
45661 /* stop the CPU */
45662- reset = 1;
45663- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45664+ reset[0] = 1;
45665+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45666 err("could not stop the USB controller CPU.");
45667
45668- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45669- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45670- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45671+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45672+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45673+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45674
45675- if (ret != hx.len) {
45676+ if (ret != hx->len) {
45677 err("error while transferring firmware "
45678 "(transferred size: %d, block size: %d)",
45679- ret,hx.len);
45680+ ret,hx->len);
45681 ret = -EINVAL;
45682 break;
45683 }
45684 }
45685 if (ret < 0) {
45686 err("firmware download failed at %d with %d",pos,ret);
45687+ kfree(reset);
45688+ kfree(hx);
45689 return ret;
45690 }
45691
45692 if (ret == 0) {
45693 /* restart the CPU */
45694- reset = 0;
45695- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45696+ reset[0] = 0;
45697+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45698 err("could not restart the USB controller CPU.");
45699 ret = -EINVAL;
45700 }
45701 } else
45702 ret = -EIO;
45703
45704+ kfree(reset);
45705+ kfree(hx);
45706+
45707 return ret;
45708 }
45709 EXPORT_SYMBOL(usb_cypress_load_firmware);
45710diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45711index 1a3df10..57997a5 100644
45712--- a/drivers/media/usb/dvb-usb/dw2102.c
45713+++ b/drivers/media/usb/dvb-usb/dw2102.c
45714@@ -118,7 +118,7 @@ struct su3000_state {
45715
45716 struct s6x0_state {
45717 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45718-};
45719+} __no_const;
45720
45721 /* debug */
45722 static int dvb_usb_dw2102_debug;
45723diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45724index 5801ae7..83f71fa 100644
45725--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45726+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45727@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45728 static int technisat_usb2_i2c_access(struct usb_device *udev,
45729 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45730 {
45731- u8 b[64];
45732- int ret, actual_length;
45733+ u8 *b = kmalloc(64, GFP_KERNEL);
45734+ int ret, actual_length, error = 0;
45735+
45736+ if (b == NULL)
45737+ return -ENOMEM;
45738
45739 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45740 debug_dump(tx, txlen, deb_i2c);
45741@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45742
45743 if (ret < 0) {
45744 err("i2c-error: out failed %02x = %d", device_addr, ret);
45745- return -ENODEV;
45746+ error = -ENODEV;
45747+ goto out;
45748 }
45749
45750 ret = usb_bulk_msg(udev,
45751@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45752 b, 64, &actual_length, 1000);
45753 if (ret < 0) {
45754 err("i2c-error: in failed %02x = %d", device_addr, ret);
45755- return -ENODEV;
45756+ error = -ENODEV;
45757+ goto out;
45758 }
45759
45760 if (b[0] != I2C_STATUS_OK) {
45761@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45762 /* handle tuner-i2c-nak */
45763 if (!(b[0] == I2C_STATUS_NAK &&
45764 device_addr == 0x60
45765- /* && device_is_technisat_usb2 */))
45766- return -ENODEV;
45767+ /* && device_is_technisat_usb2 */)) {
45768+ error = -ENODEV;
45769+ goto out;
45770+ }
45771 }
45772
45773 deb_i2c("status: %d, ", b[0]);
45774@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45775
45776 deb_i2c("\n");
45777
45778- return 0;
45779+out:
45780+ kfree(b);
45781+ return error;
45782 }
45783
45784 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45785@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45786 {
45787 int ret;
45788
45789- u8 led[8] = {
45790- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45791- 0
45792- };
45793+ u8 *led = kzalloc(8, GFP_KERNEL);
45794+
45795+ if (led == NULL)
45796+ return -ENOMEM;
45797
45798 if (disable_led_control && state != TECH_LED_OFF)
45799 return 0;
45800
45801+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45802+
45803 switch (state) {
45804 case TECH_LED_ON:
45805 led[1] = 0x82;
45806@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45807 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45808 USB_TYPE_VENDOR | USB_DIR_OUT,
45809 0, 0,
45810- led, sizeof(led), 500);
45811+ led, 8, 500);
45812
45813 mutex_unlock(&d->i2c_mutex);
45814+
45815+ kfree(led);
45816+
45817 return ret;
45818 }
45819
45820 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45821 {
45822 int ret;
45823- u8 b = 0;
45824+ u8 *b = kzalloc(1, GFP_KERNEL);
45825+
45826+ if (b == NULL)
45827+ return -ENOMEM;
45828
45829 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45830 return -EAGAIN;
45831@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45832 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45833 USB_TYPE_VENDOR | USB_DIR_OUT,
45834 (red << 8) | green, 0,
45835- &b, 1, 500);
45836+ b, 1, 500);
45837
45838 mutex_unlock(&d->i2c_mutex);
45839
45840+ kfree(b);
45841+
45842 return ret;
45843 }
45844
45845@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45846 struct dvb_usb_device_description **desc, int *cold)
45847 {
45848 int ret;
45849- u8 version[3];
45850+ u8 *version = kmalloc(3, GFP_KERNEL);
45851
45852 /* first select the interface */
45853 if (usb_set_interface(udev, 0, 1) != 0)
45854@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45855
45856 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45857
45858+ if (version == NULL)
45859+ return 0;
45860+
45861 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45862 GET_VERSION_INFO_VENDOR_REQUEST,
45863 USB_TYPE_VENDOR | USB_DIR_IN,
45864 0, 0,
45865- version, sizeof(version), 500);
45866+ version, 3, 500);
45867
45868 if (ret < 0)
45869 *cold = 1;
45870@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45871 *cold = 0;
45872 }
45873
45874+ kfree(version);
45875+
45876 return 0;
45877 }
45878
45879@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45880
45881 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45882 {
45883- u8 buf[62], *b;
45884+ u8 *buf, *b;
45885 int ret;
45886 struct ir_raw_event ev;
45887
45888+ buf = kmalloc(62, GFP_KERNEL);
45889+
45890+ if (buf == NULL)
45891+ return -ENOMEM;
45892+
45893 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45894 buf[1] = 0x08;
45895 buf[2] = 0x8f;
45896@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45897 GET_IR_DATA_VENDOR_REQUEST,
45898 USB_TYPE_VENDOR | USB_DIR_IN,
45899 0x8080, 0,
45900- buf, sizeof(buf), 500);
45901+ buf, 62, 500);
45902
45903 unlock:
45904 mutex_unlock(&d->i2c_mutex);
45905
45906- if (ret < 0)
45907+ if (ret < 0) {
45908+ kfree(buf);
45909 return ret;
45910+ }
45911
45912- if (ret == 1)
45913+ if (ret == 1) {
45914+ kfree(buf);
45915 return 0; /* no key pressed */
45916+ }
45917
45918 /* decoding */
45919 b = buf+1;
45920@@ -656,6 +689,8 @@ unlock:
45921
45922 ir_raw_event_handle(d->rc_dev);
45923
45924+ kfree(buf);
45925+
45926 return 1;
45927 }
45928
45929diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45930index af63543..0436f20 100644
45931--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45932+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45933@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45934 * by passing a very big num_planes value */
45935 uplane = compat_alloc_user_space(num_planes *
45936 sizeof(struct v4l2_plane));
45937- kp->m.planes = (__force struct v4l2_plane *)uplane;
45938+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
45939
45940 while (--num_planes >= 0) {
45941 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45942@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45943 if (num_planes == 0)
45944 return 0;
45945
45946- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
45947+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45948 if (get_user(p, &up->m.planes))
45949 return -EFAULT;
45950 uplane32 = compat_ptr(p);
45951@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45952 get_user(kp->flags, &up->flags) ||
45953 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
45954 return -EFAULT;
45955- kp->base = (__force void *)compat_ptr(tmp);
45956+ kp->base = (__force_kernel void *)compat_ptr(tmp);
45957 return 0;
45958 }
45959
45960@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45961 n * sizeof(struct v4l2_ext_control32)))
45962 return -EFAULT;
45963 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45964- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
45965+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
45966 while (--n >= 0) {
45967 u32 id;
45968
45969@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45970 {
45971 struct v4l2_ext_control32 __user *ucontrols;
45972 struct v4l2_ext_control __user *kcontrols =
45973- (__force struct v4l2_ext_control __user *)kp->controls;
45974+ (struct v4l2_ext_control __force_user *)kp->controls;
45975 int n = kp->count;
45976 compat_caddr_t p;
45977
45978@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
45979 get_user(tmp, &up->edid) ||
45980 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45981 return -EFAULT;
45982- kp->edid = (__force u8 *)compat_ptr(tmp);
45983+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
45984 return 0;
45985 }
45986
45987diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45988index 015f92a..59e311e 100644
45989--- a/drivers/media/v4l2-core/v4l2-device.c
45990+++ b/drivers/media/v4l2-core/v4l2-device.c
45991@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45992 EXPORT_SYMBOL_GPL(v4l2_device_put);
45993
45994 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45995- atomic_t *instance)
45996+ atomic_unchecked_t *instance)
45997 {
45998- int num = atomic_inc_return(instance) - 1;
45999+ int num = atomic_inc_return_unchecked(instance) - 1;
46000 int len = strlen(basename);
46001
46002 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46003diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46004index faac2f4..e39dcd9 100644
46005--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46006+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46007@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
46008 struct file *file, void *fh, void *p);
46009 } u;
46010 void (*debug)(const void *arg, bool write_only);
46011-};
46012+} __do_const;
46013+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46014
46015 /* This control needs a priority check */
46016 #define INFO_FL_PRIO (1 << 0)
46017@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
46018 struct video_device *vfd = video_devdata(file);
46019 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46020 bool write_only = false;
46021- struct v4l2_ioctl_info default_info;
46022+ v4l2_ioctl_info_no_const default_info;
46023 const struct v4l2_ioctl_info *info;
46024 void *fh = file->private_data;
46025 struct v4l2_fh *vfh = NULL;
46026@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46027 ret = -EINVAL;
46028 break;
46029 }
46030- *user_ptr = (void __user *)buf->m.planes;
46031+ *user_ptr = (void __force_user *)buf->m.planes;
46032 *kernel_ptr = (void **)&buf->m.planes;
46033 *array_size = sizeof(struct v4l2_plane) * buf->length;
46034 ret = 1;
46035@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46036 ret = -EINVAL;
46037 break;
46038 }
46039- *user_ptr = (void __user *)edid->edid;
46040+ *user_ptr = (void __force_user *)edid->edid;
46041 *kernel_ptr = (void **)&edid->edid;
46042 *array_size = edid->blocks * 128;
46043 ret = 1;
46044@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46045 ret = -EINVAL;
46046 break;
46047 }
46048- *user_ptr = (void __user *)ctrls->controls;
46049+ *user_ptr = (void __force_user *)ctrls->controls;
46050 *kernel_ptr = (void **)&ctrls->controls;
46051 *array_size = sizeof(struct v4l2_ext_control)
46052 * ctrls->count;
46053@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46054 }
46055
46056 if (has_array_args) {
46057- *kernel_ptr = (void __force *)user_ptr;
46058+ *kernel_ptr = (void __force_kernel *)user_ptr;
46059 if (copy_to_user(user_ptr, mbuf, array_size))
46060 err = -EFAULT;
46061 goto out_array_args;
46062diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
46063index 24696f5..3637780 100644
46064--- a/drivers/memory/omap-gpmc.c
46065+++ b/drivers/memory/omap-gpmc.c
46066@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
46067 };
46068
46069 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
46070-static struct irq_chip gpmc_irq_chip;
46071 static int gpmc_irq_start;
46072
46073 static struct resource gpmc_mem_root;
46074@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
46075
46076 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
46077
46078+static struct irq_chip gpmc_irq_chip = {
46079+ .name = "gpmc",
46080+ .irq_startup = gpmc_irq_noop_ret,
46081+ .irq_enable = gpmc_irq_enable,
46082+ .irq_disable = gpmc_irq_disable,
46083+ .irq_shutdown = gpmc_irq_noop,
46084+ .irq_ack = gpmc_irq_noop,
46085+ .irq_mask = gpmc_irq_noop,
46086+ .irq_unmask = gpmc_irq_noop,
46087+};
46088+
46089 static int gpmc_setup_irq(void)
46090 {
46091 int i;
46092@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46093 return gpmc_irq_start;
46094 }
46095
46096- gpmc_irq_chip.name = "gpmc";
46097- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46098- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46099- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46100- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46101- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46102- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46103- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46104-
46105 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46106 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46107
46108diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46109index 187f836..679544b 100644
46110--- a/drivers/message/fusion/mptbase.c
46111+++ b/drivers/message/fusion/mptbase.c
46112@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46113 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46114 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46115
46116+#ifdef CONFIG_GRKERNSEC_HIDESYM
46117+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46118+#else
46119 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46120 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46121+#endif
46122+
46123 /*
46124 * Rounding UP to nearest 4-kB boundary here...
46125 */
46126@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46127 ioc->facts.GlobalCredits);
46128
46129 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46130+#ifdef CONFIG_GRKERNSEC_HIDESYM
46131+ NULL, NULL);
46132+#else
46133 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46134+#endif
46135 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46136 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46137 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46138diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46139index 5bdaae1..eced16f 100644
46140--- a/drivers/message/fusion/mptsas.c
46141+++ b/drivers/message/fusion/mptsas.c
46142@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46143 return 0;
46144 }
46145
46146+static inline void
46147+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46148+{
46149+ if (phy_info->port_details) {
46150+ phy_info->port_details->rphy = rphy;
46151+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46152+ ioc->name, rphy));
46153+ }
46154+
46155+ if (rphy) {
46156+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46157+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46158+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46159+ ioc->name, rphy, rphy->dev.release));
46160+ }
46161+}
46162+
46163 /* no mutex */
46164 static void
46165 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46166@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46167 return NULL;
46168 }
46169
46170-static inline void
46171-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46172-{
46173- if (phy_info->port_details) {
46174- phy_info->port_details->rphy = rphy;
46175- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46176- ioc->name, rphy));
46177- }
46178-
46179- if (rphy) {
46180- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46181- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46182- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46183- ioc->name, rphy, rphy->dev.release));
46184- }
46185-}
46186-
46187 static inline struct sas_port *
46188 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46189 {
46190diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46191index b7d87cd..3fb36da 100644
46192--- a/drivers/message/i2o/i2o_proc.c
46193+++ b/drivers/message/i2o/i2o_proc.c
46194@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46195 "Array Controller Device"
46196 };
46197
46198-static char *chtostr(char *tmp, u8 *chars, int n)
46199-{
46200- tmp[0] = 0;
46201- return strncat(tmp, (char *)chars, n);
46202-}
46203-
46204 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46205 char *group)
46206 {
46207@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46208 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46209 {
46210 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46211- static u32 work32[5];
46212- static u8 *work8 = (u8 *) work32;
46213- static u16 *work16 = (u16 *) work32;
46214+ u32 work32[5];
46215+ u8 *work8 = (u8 *) work32;
46216+ u16 *work16 = (u16 *) work32;
46217 int token;
46218 u32 hwcap;
46219
46220@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46221 } *result;
46222
46223 i2o_exec_execute_ddm_table ddm_table;
46224- char tmp[28 + 1];
46225
46226 result = kmalloc(sizeof(*result), GFP_KERNEL);
46227 if (!result)
46228@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46229
46230 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46231 seq_printf(seq, "%-#8x", ddm_table.module_id);
46232- seq_printf(seq, "%-29s",
46233- chtostr(tmp, ddm_table.module_name_version, 28));
46234+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46235 seq_printf(seq, "%9d ", ddm_table.data_size);
46236 seq_printf(seq, "%8d", ddm_table.code_size);
46237
46238@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46239
46240 i2o_driver_result_table *result;
46241 i2o_driver_store_table *dst;
46242- char tmp[28 + 1];
46243
46244 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46245 if (result == NULL)
46246@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46247
46248 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46249 seq_printf(seq, "%-#8x", dst->module_id);
46250- seq_printf(seq, "%-29s",
46251- chtostr(tmp, dst->module_name_version, 28));
46252- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46253+ seq_printf(seq, "%-.28s", dst->module_name_version);
46254+ seq_printf(seq, "%-.8s", dst->date);
46255 seq_printf(seq, "%8d ", dst->module_size);
46256 seq_printf(seq, "%8d ", dst->mpb_size);
46257 seq_printf(seq, "0x%04x", dst->module_flags);
46258@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46259 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46260 {
46261 struct i2o_device *d = (struct i2o_device *)seq->private;
46262- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46263+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46264 // == (allow) 512d bytes (max)
46265- static u16 *work16 = (u16 *) work32;
46266+ u16 *work16 = (u16 *) work32;
46267 int token;
46268- char tmp[16 + 1];
46269
46270 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46271
46272@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46273 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46274 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46275 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46276- seq_printf(seq, "Vendor info : %s\n",
46277- chtostr(tmp, (u8 *) (work32 + 2), 16));
46278- seq_printf(seq, "Product info : %s\n",
46279- chtostr(tmp, (u8 *) (work32 + 6), 16));
46280- seq_printf(seq, "Description : %s\n",
46281- chtostr(tmp, (u8 *) (work32 + 10), 16));
46282- seq_printf(seq, "Product rev. : %s\n",
46283- chtostr(tmp, (u8 *) (work32 + 14), 8));
46284+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46285+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46286+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46287+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46288
46289 seq_printf(seq, "Serial number : ");
46290 print_serial_number(seq, (u8 *) (work32 + 16),
46291@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46292 u8 pad[256]; // allow up to 256 byte (max) serial number
46293 } result;
46294
46295- char tmp[24 + 1];
46296-
46297 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46298
46299 if (token < 0) {
46300@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46301 }
46302
46303 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46304- seq_printf(seq, "Module name : %s\n",
46305- chtostr(tmp, result.module_name, 24));
46306- seq_printf(seq, "Module revision : %s\n",
46307- chtostr(tmp, result.module_rev, 8));
46308+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46309+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46310
46311 seq_printf(seq, "Serial number : ");
46312 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46313@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46314 u8 instance_number[4];
46315 } result;
46316
46317- char tmp[64 + 1];
46318-
46319 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46320
46321 if (token < 0) {
46322@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46323 return 0;
46324 }
46325
46326- seq_printf(seq, "Device name : %s\n",
46327- chtostr(tmp, result.device_name, 64));
46328- seq_printf(seq, "Service name : %s\n",
46329- chtostr(tmp, result.service_name, 64));
46330- seq_printf(seq, "Physical name : %s\n",
46331- chtostr(tmp, result.physical_location, 64));
46332- seq_printf(seq, "Instance number : %s\n",
46333- chtostr(tmp, result.instance_number, 4));
46334+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46335+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46336+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46337+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46338
46339 return 0;
46340 }
46341@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46342 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46343 {
46344 struct i2o_device *d = (struct i2o_device *)seq->private;
46345- static u32 work32[12];
46346- static u16 *work16 = (u16 *) work32;
46347- static u8 *work8 = (u8 *) work32;
46348+ u32 work32[12];
46349+ u16 *work16 = (u16 *) work32;
46350+ u8 *work8 = (u8 *) work32;
46351 int token;
46352
46353 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46354diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46355index 92752fb..a7494f6 100644
46356--- a/drivers/message/i2o/iop.c
46357+++ b/drivers/message/i2o/iop.c
46358@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46359
46360 spin_lock_irqsave(&c->context_list_lock, flags);
46361
46362- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46363- atomic_inc(&c->context_list_counter);
46364+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46365+ atomic_inc_unchecked(&c->context_list_counter);
46366
46367- entry->context = atomic_read(&c->context_list_counter);
46368+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46369
46370 list_add(&entry->list, &c->context_list);
46371
46372@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46373
46374 #if BITS_PER_LONG == 64
46375 spin_lock_init(&c->context_list_lock);
46376- atomic_set(&c->context_list_counter, 0);
46377+ atomic_set_unchecked(&c->context_list_counter, 0);
46378 INIT_LIST_HEAD(&c->context_list);
46379 #endif
46380
46381diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46382index 9a8e185..27ff17d 100644
46383--- a/drivers/mfd/ab8500-debugfs.c
46384+++ b/drivers/mfd/ab8500-debugfs.c
46385@@ -100,7 +100,7 @@ static int irq_last;
46386 static u32 *irq_count;
46387 static int num_irqs;
46388
46389-static struct device_attribute **dev_attr;
46390+static device_attribute_no_const **dev_attr;
46391 static char **event_name;
46392
46393 static u8 avg_sample = SAMPLE_16;
46394diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46395index c880c89..45a7c68 100644
46396--- a/drivers/mfd/max8925-i2c.c
46397+++ b/drivers/mfd/max8925-i2c.c
46398@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46399 const struct i2c_device_id *id)
46400 {
46401 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46402- static struct max8925_chip *chip;
46403+ struct max8925_chip *chip;
46404 struct device_node *node = client->dev.of_node;
46405
46406 if (node && !pdata) {
46407diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46408index 7612d89..70549c2 100644
46409--- a/drivers/mfd/tps65910.c
46410+++ b/drivers/mfd/tps65910.c
46411@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46412 struct tps65910_platform_data *pdata)
46413 {
46414 int ret = 0;
46415- static struct regmap_irq_chip *tps6591x_irqs_chip;
46416+ struct regmap_irq_chip *tps6591x_irqs_chip;
46417
46418 if (!irq) {
46419 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46420diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46421index 1b772ef..01e77d33 100644
46422--- a/drivers/mfd/twl4030-irq.c
46423+++ b/drivers/mfd/twl4030-irq.c
46424@@ -34,6 +34,7 @@
46425 #include <linux/of.h>
46426 #include <linux/irqdomain.h>
46427 #include <linux/i2c/twl.h>
46428+#include <asm/pgtable.h>
46429
46430 #include "twl-core.h"
46431
46432@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46433 * Install an irq handler for each of the SIH modules;
46434 * clone dummy irq_chip since PIH can't *do* anything
46435 */
46436- twl4030_irq_chip = dummy_irq_chip;
46437- twl4030_irq_chip.name = "twl4030";
46438+ pax_open_kernel();
46439+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46440+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46441
46442- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46443+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46444+ pax_close_kernel();
46445
46446 for (i = irq_base; i < irq_end; i++) {
46447 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46448diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46449index 464419b..64bae8d 100644
46450--- a/drivers/misc/c2port/core.c
46451+++ b/drivers/misc/c2port/core.c
46452@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46453 goto error_idr_alloc;
46454 c2dev->id = ret;
46455
46456- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46457+ pax_open_kernel();
46458+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46459+ pax_close_kernel();
46460
46461 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46462 "c2port%d", c2dev->id);
46463diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46464index 8385177..2f54635 100644
46465--- a/drivers/misc/eeprom/sunxi_sid.c
46466+++ b/drivers/misc/eeprom/sunxi_sid.c
46467@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46468
46469 platform_set_drvdata(pdev, sid_data);
46470
46471- sid_bin_attr.size = sid_data->keysize;
46472+ pax_open_kernel();
46473+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46474+ pax_close_kernel();
46475 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46476 return -ENODEV;
46477
46478diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46479index 36f5d52..32311c3 100644
46480--- a/drivers/misc/kgdbts.c
46481+++ b/drivers/misc/kgdbts.c
46482@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46483 char before[BREAK_INSTR_SIZE];
46484 char after[BREAK_INSTR_SIZE];
46485
46486- probe_kernel_read(before, (char *)kgdbts_break_test,
46487+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46488 BREAK_INSTR_SIZE);
46489 init_simple_test();
46490 ts.tst = plant_and_detach_test;
46491@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46492 /* Activate test with initial breakpoint */
46493 if (!is_early)
46494 kgdb_breakpoint();
46495- probe_kernel_read(after, (char *)kgdbts_break_test,
46496+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46497 BREAK_INSTR_SIZE);
46498 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46499 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46500diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46501index 3ef4627..8d00486 100644
46502--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46503+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46504@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46505 * the lid is closed. This leads to interrupts as soon as a little move
46506 * is done.
46507 */
46508- atomic_inc(&lis3->count);
46509+ atomic_inc_unchecked(&lis3->count);
46510
46511 wake_up_interruptible(&lis3->misc_wait);
46512 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46513@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46514 if (lis3->pm_dev)
46515 pm_runtime_get_sync(lis3->pm_dev);
46516
46517- atomic_set(&lis3->count, 0);
46518+ atomic_set_unchecked(&lis3->count, 0);
46519 return 0;
46520 }
46521
46522@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46523 add_wait_queue(&lis3->misc_wait, &wait);
46524 while (true) {
46525 set_current_state(TASK_INTERRUPTIBLE);
46526- data = atomic_xchg(&lis3->count, 0);
46527+ data = atomic_xchg_unchecked(&lis3->count, 0);
46528 if (data)
46529 break;
46530
46531@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46532 struct lis3lv02d, miscdev);
46533
46534 poll_wait(file, &lis3->misc_wait, wait);
46535- if (atomic_read(&lis3->count))
46536+ if (atomic_read_unchecked(&lis3->count))
46537 return POLLIN | POLLRDNORM;
46538 return 0;
46539 }
46540diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46541index c439c82..1f20f57 100644
46542--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46543+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46544@@ -297,7 +297,7 @@ struct lis3lv02d {
46545 struct input_polled_dev *idev; /* input device */
46546 struct platform_device *pdev; /* platform device */
46547 struct regulator_bulk_data regulators[2];
46548- atomic_t count; /* interrupt count after last read */
46549+ atomic_unchecked_t count; /* interrupt count after last read */
46550 union axis_conversion ac; /* hw -> logical axis */
46551 int mapped_btns[3];
46552
46553diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46554index 2f30bad..c4c13d0 100644
46555--- a/drivers/misc/sgi-gru/gruhandles.c
46556+++ b/drivers/misc/sgi-gru/gruhandles.c
46557@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46558 unsigned long nsec;
46559
46560 nsec = CLKS2NSEC(clks);
46561- atomic_long_inc(&mcs_op_statistics[op].count);
46562- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46563+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46564+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46565 if (mcs_op_statistics[op].max < nsec)
46566 mcs_op_statistics[op].max = nsec;
46567 }
46568diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46569index 4f76359..cdfcb2e 100644
46570--- a/drivers/misc/sgi-gru/gruprocfs.c
46571+++ b/drivers/misc/sgi-gru/gruprocfs.c
46572@@ -32,9 +32,9 @@
46573
46574 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46575
46576-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46577+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46578 {
46579- unsigned long val = atomic_long_read(v);
46580+ unsigned long val = atomic_long_read_unchecked(v);
46581
46582 seq_printf(s, "%16lu %s\n", val, id);
46583 }
46584@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46585
46586 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46587 for (op = 0; op < mcsop_last; op++) {
46588- count = atomic_long_read(&mcs_op_statistics[op].count);
46589- total = atomic_long_read(&mcs_op_statistics[op].total);
46590+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46591+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46592 max = mcs_op_statistics[op].max;
46593 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46594 count ? total / count : 0, max);
46595diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46596index 5c3ce24..4915ccb 100644
46597--- a/drivers/misc/sgi-gru/grutables.h
46598+++ b/drivers/misc/sgi-gru/grutables.h
46599@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46600 * GRU statistics.
46601 */
46602 struct gru_stats_s {
46603- atomic_long_t vdata_alloc;
46604- atomic_long_t vdata_free;
46605- atomic_long_t gts_alloc;
46606- atomic_long_t gts_free;
46607- atomic_long_t gms_alloc;
46608- atomic_long_t gms_free;
46609- atomic_long_t gts_double_allocate;
46610- atomic_long_t assign_context;
46611- atomic_long_t assign_context_failed;
46612- atomic_long_t free_context;
46613- atomic_long_t load_user_context;
46614- atomic_long_t load_kernel_context;
46615- atomic_long_t lock_kernel_context;
46616- atomic_long_t unlock_kernel_context;
46617- atomic_long_t steal_user_context;
46618- atomic_long_t steal_kernel_context;
46619- atomic_long_t steal_context_failed;
46620- atomic_long_t nopfn;
46621- atomic_long_t asid_new;
46622- atomic_long_t asid_next;
46623- atomic_long_t asid_wrap;
46624- atomic_long_t asid_reuse;
46625- atomic_long_t intr;
46626- atomic_long_t intr_cbr;
46627- atomic_long_t intr_tfh;
46628- atomic_long_t intr_spurious;
46629- atomic_long_t intr_mm_lock_failed;
46630- atomic_long_t call_os;
46631- atomic_long_t call_os_wait_queue;
46632- atomic_long_t user_flush_tlb;
46633- atomic_long_t user_unload_context;
46634- atomic_long_t user_exception;
46635- atomic_long_t set_context_option;
46636- atomic_long_t check_context_retarget_intr;
46637- atomic_long_t check_context_unload;
46638- atomic_long_t tlb_dropin;
46639- atomic_long_t tlb_preload_page;
46640- atomic_long_t tlb_dropin_fail_no_asid;
46641- atomic_long_t tlb_dropin_fail_upm;
46642- atomic_long_t tlb_dropin_fail_invalid;
46643- atomic_long_t tlb_dropin_fail_range_active;
46644- atomic_long_t tlb_dropin_fail_idle;
46645- atomic_long_t tlb_dropin_fail_fmm;
46646- atomic_long_t tlb_dropin_fail_no_exception;
46647- atomic_long_t tfh_stale_on_fault;
46648- atomic_long_t mmu_invalidate_range;
46649- atomic_long_t mmu_invalidate_page;
46650- atomic_long_t flush_tlb;
46651- atomic_long_t flush_tlb_gru;
46652- atomic_long_t flush_tlb_gru_tgh;
46653- atomic_long_t flush_tlb_gru_zero_asid;
46654+ atomic_long_unchecked_t vdata_alloc;
46655+ atomic_long_unchecked_t vdata_free;
46656+ atomic_long_unchecked_t gts_alloc;
46657+ atomic_long_unchecked_t gts_free;
46658+ atomic_long_unchecked_t gms_alloc;
46659+ atomic_long_unchecked_t gms_free;
46660+ atomic_long_unchecked_t gts_double_allocate;
46661+ atomic_long_unchecked_t assign_context;
46662+ atomic_long_unchecked_t assign_context_failed;
46663+ atomic_long_unchecked_t free_context;
46664+ atomic_long_unchecked_t load_user_context;
46665+ atomic_long_unchecked_t load_kernel_context;
46666+ atomic_long_unchecked_t lock_kernel_context;
46667+ atomic_long_unchecked_t unlock_kernel_context;
46668+ atomic_long_unchecked_t steal_user_context;
46669+ atomic_long_unchecked_t steal_kernel_context;
46670+ atomic_long_unchecked_t steal_context_failed;
46671+ atomic_long_unchecked_t nopfn;
46672+ atomic_long_unchecked_t asid_new;
46673+ atomic_long_unchecked_t asid_next;
46674+ atomic_long_unchecked_t asid_wrap;
46675+ atomic_long_unchecked_t asid_reuse;
46676+ atomic_long_unchecked_t intr;
46677+ atomic_long_unchecked_t intr_cbr;
46678+ atomic_long_unchecked_t intr_tfh;
46679+ atomic_long_unchecked_t intr_spurious;
46680+ atomic_long_unchecked_t intr_mm_lock_failed;
46681+ atomic_long_unchecked_t call_os;
46682+ atomic_long_unchecked_t call_os_wait_queue;
46683+ atomic_long_unchecked_t user_flush_tlb;
46684+ atomic_long_unchecked_t user_unload_context;
46685+ atomic_long_unchecked_t user_exception;
46686+ atomic_long_unchecked_t set_context_option;
46687+ atomic_long_unchecked_t check_context_retarget_intr;
46688+ atomic_long_unchecked_t check_context_unload;
46689+ atomic_long_unchecked_t tlb_dropin;
46690+ atomic_long_unchecked_t tlb_preload_page;
46691+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46692+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46693+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46694+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46695+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46696+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46697+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46698+ atomic_long_unchecked_t tfh_stale_on_fault;
46699+ atomic_long_unchecked_t mmu_invalidate_range;
46700+ atomic_long_unchecked_t mmu_invalidate_page;
46701+ atomic_long_unchecked_t flush_tlb;
46702+ atomic_long_unchecked_t flush_tlb_gru;
46703+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46704+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46705
46706- atomic_long_t copy_gpa;
46707- atomic_long_t read_gpa;
46708+ atomic_long_unchecked_t copy_gpa;
46709+ atomic_long_unchecked_t read_gpa;
46710
46711- atomic_long_t mesq_receive;
46712- atomic_long_t mesq_receive_none;
46713- atomic_long_t mesq_send;
46714- atomic_long_t mesq_send_failed;
46715- atomic_long_t mesq_noop;
46716- atomic_long_t mesq_send_unexpected_error;
46717- atomic_long_t mesq_send_lb_overflow;
46718- atomic_long_t mesq_send_qlimit_reached;
46719- atomic_long_t mesq_send_amo_nacked;
46720- atomic_long_t mesq_send_put_nacked;
46721- atomic_long_t mesq_page_overflow;
46722- atomic_long_t mesq_qf_locked;
46723- atomic_long_t mesq_qf_noop_not_full;
46724- atomic_long_t mesq_qf_switch_head_failed;
46725- atomic_long_t mesq_qf_unexpected_error;
46726- atomic_long_t mesq_noop_unexpected_error;
46727- atomic_long_t mesq_noop_lb_overflow;
46728- atomic_long_t mesq_noop_qlimit_reached;
46729- atomic_long_t mesq_noop_amo_nacked;
46730- atomic_long_t mesq_noop_put_nacked;
46731- atomic_long_t mesq_noop_page_overflow;
46732+ atomic_long_unchecked_t mesq_receive;
46733+ atomic_long_unchecked_t mesq_receive_none;
46734+ atomic_long_unchecked_t mesq_send;
46735+ atomic_long_unchecked_t mesq_send_failed;
46736+ atomic_long_unchecked_t mesq_noop;
46737+ atomic_long_unchecked_t mesq_send_unexpected_error;
46738+ atomic_long_unchecked_t mesq_send_lb_overflow;
46739+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46740+ atomic_long_unchecked_t mesq_send_amo_nacked;
46741+ atomic_long_unchecked_t mesq_send_put_nacked;
46742+ atomic_long_unchecked_t mesq_page_overflow;
46743+ atomic_long_unchecked_t mesq_qf_locked;
46744+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46745+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46746+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46747+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46748+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46749+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46750+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46751+ atomic_long_unchecked_t mesq_noop_put_nacked;
46752+ atomic_long_unchecked_t mesq_noop_page_overflow;
46753
46754 };
46755
46756@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46757 tghop_invalidate, mcsop_last};
46758
46759 struct mcs_op_statistic {
46760- atomic_long_t count;
46761- atomic_long_t total;
46762+ atomic_long_unchecked_t count;
46763+ atomic_long_unchecked_t total;
46764 unsigned long max;
46765 };
46766
46767@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46768
46769 #define STAT(id) do { \
46770 if (gru_options & OPT_STATS) \
46771- atomic_long_inc(&gru_stats.id); \
46772+ atomic_long_inc_unchecked(&gru_stats.id); \
46773 } while (0)
46774
46775 #ifdef CONFIG_SGI_GRU_DEBUG
46776diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46777index c862cd4..0d176fe 100644
46778--- a/drivers/misc/sgi-xp/xp.h
46779+++ b/drivers/misc/sgi-xp/xp.h
46780@@ -288,7 +288,7 @@ struct xpc_interface {
46781 xpc_notify_func, void *);
46782 void (*received) (short, int, void *);
46783 enum xp_retval (*partid_to_nasids) (short, void *);
46784-};
46785+} __no_const;
46786
46787 extern struct xpc_interface xpc_interface;
46788
46789diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46790index 01be66d..e3a0c7e 100644
46791--- a/drivers/misc/sgi-xp/xp_main.c
46792+++ b/drivers/misc/sgi-xp/xp_main.c
46793@@ -78,13 +78,13 @@ xpc_notloaded(void)
46794 }
46795
46796 struct xpc_interface xpc_interface = {
46797- (void (*)(int))xpc_notloaded,
46798- (void (*)(int))xpc_notloaded,
46799- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46800- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46801+ .connect = (void (*)(int))xpc_notloaded,
46802+ .disconnect = (void (*)(int))xpc_notloaded,
46803+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46804+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46805 void *))xpc_notloaded,
46806- (void (*)(short, int, void *))xpc_notloaded,
46807- (enum xp_retval(*)(short, void *))xpc_notloaded
46808+ .received = (void (*)(short, int, void *))xpc_notloaded,
46809+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46810 };
46811 EXPORT_SYMBOL_GPL(xpc_interface);
46812
46813diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46814index b94d5f7..7f494c5 100644
46815--- a/drivers/misc/sgi-xp/xpc.h
46816+++ b/drivers/misc/sgi-xp/xpc.h
46817@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46818 void (*received_payload) (struct xpc_channel *, void *);
46819 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46820 };
46821+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46822
46823 /* struct xpc_partition act_state values (for XPC HB) */
46824
46825@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46826 /* found in xpc_main.c */
46827 extern struct device *xpc_part;
46828 extern struct device *xpc_chan;
46829-extern struct xpc_arch_operations xpc_arch_ops;
46830+extern xpc_arch_operations_no_const xpc_arch_ops;
46831 extern int xpc_disengage_timelimit;
46832 extern int xpc_disengage_timedout;
46833 extern int xpc_activate_IRQ_rcvd;
46834diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46835index 82dc574..8539ab2 100644
46836--- a/drivers/misc/sgi-xp/xpc_main.c
46837+++ b/drivers/misc/sgi-xp/xpc_main.c
46838@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46839 .notifier_call = xpc_system_die,
46840 };
46841
46842-struct xpc_arch_operations xpc_arch_ops;
46843+xpc_arch_operations_no_const xpc_arch_ops;
46844
46845 /*
46846 * Timer function to enforce the timelimit on the partition disengage.
46847@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46848
46849 if (((die_args->trapnr == X86_TRAP_MF) ||
46850 (die_args->trapnr == X86_TRAP_XF)) &&
46851- !user_mode_vm(die_args->regs))
46852+ !user_mode(die_args->regs))
46853 xpc_die_deactivate();
46854
46855 break;
46856diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46857index 4409d79..d7766d0 100644
46858--- a/drivers/mmc/card/block.c
46859+++ b/drivers/mmc/card/block.c
46860@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46861 if (idata->ic.postsleep_min_us)
46862 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46863
46864- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46865+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46866 err = -EFAULT;
46867 goto cmd_rel_host;
46868 }
46869diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46870index 0d0f7a2..45b8d60 100644
46871--- a/drivers/mmc/host/dw_mmc.h
46872+++ b/drivers/mmc/host/dw_mmc.h
46873@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46874 int (*parse_dt)(struct dw_mci *host);
46875 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46876 struct dw_mci_tuning_data *tuning_data);
46877-};
46878+} __do_const;
46879 #endif /* _DW_MMC_H_ */
46880diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46881index 8232e9a..7776006 100644
46882--- a/drivers/mmc/host/mmci.c
46883+++ b/drivers/mmc/host/mmci.c
46884@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46885 mmc->caps |= MMC_CAP_CMD23;
46886
46887 if (variant->busy_detect) {
46888- mmci_ops.card_busy = mmci_card_busy;
46889+ pax_open_kernel();
46890+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46891+ pax_close_kernel();
46892 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46893 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46894 mmc->max_busy_timeout = 0;
46895diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46896index 7c71dcd..74cb746 100644
46897--- a/drivers/mmc/host/omap_hsmmc.c
46898+++ b/drivers/mmc/host/omap_hsmmc.c
46899@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46900
46901 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46902 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46903- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46904+ pax_open_kernel();
46905+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46906+ pax_close_kernel();
46907 }
46908
46909 pm_runtime_enable(host->dev);
46910diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46911index af1f7c0..00d368a 100644
46912--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46913+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46914@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46915 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46916 }
46917
46918- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46919- sdhci_esdhc_ops.platform_execute_tuning =
46920+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46921+ pax_open_kernel();
46922+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46923 esdhc_executing_tuning;
46924+ pax_close_kernel();
46925+ }
46926
46927 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46928 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46929diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46930index c45b893..fba0144 100644
46931--- a/drivers/mmc/host/sdhci-s3c.c
46932+++ b/drivers/mmc/host/sdhci-s3c.c
46933@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46934 * we can use overriding functions instead of default.
46935 */
46936 if (sc->no_divider) {
46937- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46938- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46939- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46940+ pax_open_kernel();
46941+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46942+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46943+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46944+ pax_close_kernel();
46945 }
46946
46947 /* It supports additional host capabilities if needed */
46948diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46949index 423666b..81ff5eb 100644
46950--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46951+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46952@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46953 size_t totlen = 0, thislen;
46954 int ret = 0;
46955 size_t buflen = 0;
46956- static char *buffer;
46957+ char *buffer;
46958
46959 if (!ECCBUF_SIZE) {
46960 /* We should fall back to a general writev implementation.
46961diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46962index b3b7ca1..5dd4634 100644
46963--- a/drivers/mtd/nand/denali.c
46964+++ b/drivers/mtd/nand/denali.c
46965@@ -24,6 +24,7 @@
46966 #include <linux/slab.h>
46967 #include <linux/mtd/mtd.h>
46968 #include <linux/module.h>
46969+#include <linux/slab.h>
46970
46971 #include "denali.h"
46972
46973diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46974index 4f3851a..f477a23 100644
46975--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46976+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46977@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46978
46979 /* first try to map the upper buffer directly */
46980 if (virt_addr_valid(this->upper_buf) &&
46981- !object_is_on_stack(this->upper_buf)) {
46982+ !object_starts_on_stack(this->upper_buf)) {
46983 sg_init_one(sgl, this->upper_buf, this->upper_len);
46984 ret = dma_map_sg(this->dev, sgl, 1, dr);
46985 if (ret == 0)
46986diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46987index 51b9d6a..52af9a7 100644
46988--- a/drivers/mtd/nftlmount.c
46989+++ b/drivers/mtd/nftlmount.c
46990@@ -24,6 +24,7 @@
46991 #include <asm/errno.h>
46992 #include <linux/delay.h>
46993 #include <linux/slab.h>
46994+#include <linux/sched.h>
46995 #include <linux/mtd/mtd.h>
46996 #include <linux/mtd/nand.h>
46997 #include <linux/mtd/nftl.h>
46998diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46999index c23184a..4115c41 100644
47000--- a/drivers/mtd/sm_ftl.c
47001+++ b/drivers/mtd/sm_ftl.c
47002@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47003 #define SM_CIS_VENDOR_OFFSET 0x59
47004 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47005 {
47006- struct attribute_group *attr_group;
47007+ attribute_group_no_const *attr_group;
47008 struct attribute **attributes;
47009 struct sm_sysfs_attribute *vendor_attribute;
47010 char *vendor;
47011diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47012index 7b11243..b3278a3 100644
47013--- a/drivers/net/bonding/bond_netlink.c
47014+++ b/drivers/net/bonding/bond_netlink.c
47015@@ -585,7 +585,7 @@ nla_put_failure:
47016 return -EMSGSIZE;
47017 }
47018
47019-struct rtnl_link_ops bond_link_ops __read_mostly = {
47020+struct rtnl_link_ops bond_link_ops = {
47021 .kind = "bond",
47022 .priv_size = sizeof(struct bonding),
47023 .setup = bond_setup,
47024diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47025index b3b922a..80bba38 100644
47026--- a/drivers/net/caif/caif_hsi.c
47027+++ b/drivers/net/caif/caif_hsi.c
47028@@ -1444,7 +1444,7 @@ err:
47029 return -ENODEV;
47030 }
47031
47032-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47033+static struct rtnl_link_ops caif_hsi_link_ops = {
47034 .kind = "cfhsi",
47035 .priv_size = sizeof(struct cfhsi),
47036 .setup = cfhsi_setup,
47037diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47038index 98d73aa..63ef9da 100644
47039--- a/drivers/net/can/Kconfig
47040+++ b/drivers/net/can/Kconfig
47041@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47042
47043 config CAN_FLEXCAN
47044 tristate "Support for Freescale FLEXCAN based chips"
47045- depends on ARM || PPC
47046+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47047 ---help---
47048 Say Y here if you want to support for Freescale FlexCAN.
47049
47050diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47051index 62ca0e8..3bed607 100644
47052--- a/drivers/net/can/dev.c
47053+++ b/drivers/net/can/dev.c
47054@@ -958,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47055 return -EOPNOTSUPP;
47056 }
47057
47058-static struct rtnl_link_ops can_link_ops __read_mostly = {
47059+static struct rtnl_link_ops can_link_ops = {
47060 .kind = "can",
47061 .maxtype = IFLA_CAN_MAX,
47062 .policy = can_policy,
47063diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47064index 674f367..ec3a31f 100644
47065--- a/drivers/net/can/vcan.c
47066+++ b/drivers/net/can/vcan.c
47067@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47068 dev->destructor = free_netdev;
47069 }
47070
47071-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47072+static struct rtnl_link_ops vcan_link_ops = {
47073 .kind = "vcan",
47074 .setup = vcan_setup,
47075 };
47076diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47077index 49adbf1..fff7ff8 100644
47078--- a/drivers/net/dummy.c
47079+++ b/drivers/net/dummy.c
47080@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47081 return 0;
47082 }
47083
47084-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47085+static struct rtnl_link_ops dummy_link_ops = {
47086 .kind = DRV_NAME,
47087 .setup = dummy_setup,
47088 .validate = dummy_validate,
47089diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47090index 0443654..4f0aa18 100644
47091--- a/drivers/net/ethernet/8390/ax88796.c
47092+++ b/drivers/net/ethernet/8390/ax88796.c
47093@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47094 if (ax->plat->reg_offsets)
47095 ei_local->reg_offset = ax->plat->reg_offsets;
47096 else {
47097+ resource_size_t _mem_size = mem_size;
47098+ do_div(_mem_size, 0x18);
47099 ei_local->reg_offset = ax->reg_offsets;
47100 for (ret = 0; ret < 0x18; ret++)
47101- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47102+ ax->reg_offsets[ret] = _mem_size * ret;
47103 }
47104
47105 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47106diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47107index 760c72c..a99728c 100644
47108--- a/drivers/net/ethernet/altera/altera_tse_main.c
47109+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47110@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
47111 return 0;
47112 }
47113
47114-static struct net_device_ops altera_tse_netdev_ops = {
47115+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47116 .ndo_open = tse_open,
47117 .ndo_stop = tse_shutdown,
47118 .ndo_start_xmit = tse_start_xmit,
47119@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47120 ndev->netdev_ops = &altera_tse_netdev_ops;
47121 altera_tse_set_ethtool_ops(ndev);
47122
47123+ pax_open_kernel();
47124 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47125
47126 if (priv->hash_filter)
47127 altera_tse_netdev_ops.ndo_set_rx_mode =
47128 tse_set_rx_mode_hashfilter;
47129+ pax_close_kernel();
47130
47131 /* Scatter/gather IO is not supported,
47132 * so it is turned off
47133diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47134index 29a0927..5a348e24 100644
47135--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47136+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47137@@ -1122,14 +1122,14 @@ do { \
47138 * operations, everything works on mask values.
47139 */
47140 #define XMDIO_READ(_pdata, _mmd, _reg) \
47141- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47142+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47143 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47144
47145 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47146 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47147
47148 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47149- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47150+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47151 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47152
47153 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47154diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47155index 8a50b01..39c1ad0 100644
47156--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47157+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47158@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47159
47160 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47161
47162- pdata->hw_if.config_dcb_tc(pdata);
47163+ pdata->hw_if->config_dcb_tc(pdata);
47164
47165 return 0;
47166 }
47167@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47168
47169 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47170
47171- pdata->hw_if.config_dcb_pfc(pdata);
47172+ pdata->hw_if->config_dcb_pfc(pdata);
47173
47174 return 0;
47175 }
47176diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47177index a50891f..b26fe24 100644
47178--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47179+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47180@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47181
47182 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47183 {
47184- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47185+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47186 struct xgbe_channel *channel;
47187 struct xgbe_ring *ring;
47188 struct xgbe_ring_data *rdata;
47189@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47190
47191 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47192 {
47193- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47194+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47195 struct xgbe_channel *channel;
47196 struct xgbe_ring *ring;
47197 struct xgbe_ring_desc *rdesc;
47198@@ -624,7 +624,7 @@ err_out:
47199 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47200 {
47201 struct xgbe_prv_data *pdata = channel->pdata;
47202- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47203+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47204 struct xgbe_ring *ring = channel->rx_ring;
47205 struct xgbe_ring_data *rdata;
47206 int i;
47207@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47208 DBGPR("<--xgbe_realloc_rx_buffer\n");
47209 }
47210
47211-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47212-{
47213- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47214-
47215- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47216- desc_if->free_ring_resources = xgbe_free_ring_resources;
47217- desc_if->map_tx_skb = xgbe_map_tx_skb;
47218- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47219- desc_if->unmap_rdata = xgbe_unmap_rdata;
47220- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47221- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47222-
47223- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47224-}
47225+const struct xgbe_desc_if default_xgbe_desc_if = {
47226+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47227+ .free_ring_resources = xgbe_free_ring_resources,
47228+ .map_tx_skb = xgbe_map_tx_skb,
47229+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47230+ .unmap_rdata = xgbe_unmap_rdata,
47231+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47232+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47233+};
47234diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47235index 4c66cd1..1a20aab 100644
47236--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47237+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47238@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47239
47240 static int xgbe_init(struct xgbe_prv_data *pdata)
47241 {
47242- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47243+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47244 int ret;
47245
47246 DBGPR("-->xgbe_init\n");
47247@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47248 return 0;
47249 }
47250
47251-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47252-{
47253- DBGPR("-->xgbe_init_function_ptrs\n");
47254-
47255- hw_if->tx_complete = xgbe_tx_complete;
47256-
47257- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47258- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47259- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47260- hw_if->set_mac_address = xgbe_set_mac_address;
47261-
47262- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47263- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47264-
47265- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47266- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47267- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47268- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47269- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47270-
47271- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47272- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47273-
47274- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47275- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47276- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47277-
47278- hw_if->enable_tx = xgbe_enable_tx;
47279- hw_if->disable_tx = xgbe_disable_tx;
47280- hw_if->enable_rx = xgbe_enable_rx;
47281- hw_if->disable_rx = xgbe_disable_rx;
47282-
47283- hw_if->powerup_tx = xgbe_powerup_tx;
47284- hw_if->powerdown_tx = xgbe_powerdown_tx;
47285- hw_if->powerup_rx = xgbe_powerup_rx;
47286- hw_if->powerdown_rx = xgbe_powerdown_rx;
47287-
47288- hw_if->dev_xmit = xgbe_dev_xmit;
47289- hw_if->dev_read = xgbe_dev_read;
47290- hw_if->enable_int = xgbe_enable_int;
47291- hw_if->disable_int = xgbe_disable_int;
47292- hw_if->init = xgbe_init;
47293- hw_if->exit = xgbe_exit;
47294+const struct xgbe_hw_if default_xgbe_hw_if = {
47295+ .tx_complete = xgbe_tx_complete,
47296+
47297+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47298+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47299+ .add_mac_addresses = xgbe_add_mac_addresses,
47300+ .set_mac_address = xgbe_set_mac_address,
47301+
47302+ .enable_rx_csum = xgbe_enable_rx_csum,
47303+ .disable_rx_csum = xgbe_disable_rx_csum,
47304+
47305+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47306+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47307+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47308+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47309+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47310+
47311+ .read_mmd_regs = xgbe_read_mmd_regs,
47312+ .write_mmd_regs = xgbe_write_mmd_regs,
47313+
47314+ .set_gmii_speed = xgbe_set_gmii_speed,
47315+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47316+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47317+
47318+ .enable_tx = xgbe_enable_tx,
47319+ .disable_tx = xgbe_disable_tx,
47320+ .enable_rx = xgbe_enable_rx,
47321+ .disable_rx = xgbe_disable_rx,
47322+
47323+ .powerup_tx = xgbe_powerup_tx,
47324+ .powerdown_tx = xgbe_powerdown_tx,
47325+ .powerup_rx = xgbe_powerup_rx,
47326+ .powerdown_rx = xgbe_powerdown_rx,
47327+
47328+ .dev_xmit = xgbe_dev_xmit,
47329+ .dev_read = xgbe_dev_read,
47330+ .enable_int = xgbe_enable_int,
47331+ .disable_int = xgbe_disable_int,
47332+ .init = xgbe_init,
47333+ .exit = xgbe_exit,
47334
47335 /* Descriptor related Sequences have to be initialized here */
47336- hw_if->tx_desc_init = xgbe_tx_desc_init;
47337- hw_if->rx_desc_init = xgbe_rx_desc_init;
47338- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47339- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47340- hw_if->is_last_desc = xgbe_is_last_desc;
47341- hw_if->is_context_desc = xgbe_is_context_desc;
47342- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47343+ .tx_desc_init = xgbe_tx_desc_init,
47344+ .rx_desc_init = xgbe_rx_desc_init,
47345+ .tx_desc_reset = xgbe_tx_desc_reset,
47346+ .rx_desc_reset = xgbe_rx_desc_reset,
47347+ .is_last_desc = xgbe_is_last_desc,
47348+ .is_context_desc = xgbe_is_context_desc,
47349+ .tx_start_xmit = xgbe_tx_start_xmit,
47350
47351 /* For FLOW ctrl */
47352- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47353- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47354+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47355+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47356
47357 /* For RX coalescing */
47358- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47359- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47360- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47361- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47362+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47363+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47364+ .usec_to_riwt = xgbe_usec_to_riwt,
47365+ .riwt_to_usec = xgbe_riwt_to_usec,
47366
47367 /* For RX and TX threshold config */
47368- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47369- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47370+ .config_rx_threshold = xgbe_config_rx_threshold,
47371+ .config_tx_threshold = xgbe_config_tx_threshold,
47372
47373 /* For RX and TX Store and Forward Mode config */
47374- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47375- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47376+ .config_rsf_mode = xgbe_config_rsf_mode,
47377+ .config_tsf_mode = xgbe_config_tsf_mode,
47378
47379 /* For TX DMA Operating on Second Frame config */
47380- hw_if->config_osp_mode = xgbe_config_osp_mode;
47381+ .config_osp_mode = xgbe_config_osp_mode,
47382
47383 /* For RX and TX PBL config */
47384- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47385- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47386- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47387- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47388- hw_if->config_pblx8 = xgbe_config_pblx8;
47389+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47390+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47391+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47392+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47393+ .config_pblx8 = xgbe_config_pblx8,
47394
47395 /* For MMC statistics support */
47396- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47397- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47398- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47399+ .tx_mmc_int = xgbe_tx_mmc_int,
47400+ .rx_mmc_int = xgbe_rx_mmc_int,
47401+ .read_mmc_stats = xgbe_read_mmc_stats,
47402
47403 /* For PTP config */
47404- hw_if->config_tstamp = xgbe_config_tstamp;
47405- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47406- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47407- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47408- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47409+ .config_tstamp = xgbe_config_tstamp,
47410+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47411+ .set_tstamp_time = xgbe_set_tstamp_time,
47412+ .get_tstamp_time = xgbe_get_tstamp_time,
47413+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47414
47415 /* For Data Center Bridging config */
47416- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47417- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47418+ .config_dcb_tc = xgbe_config_dcb_tc,
47419+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47420
47421 /* For Receive Side Scaling */
47422- hw_if->enable_rss = xgbe_enable_rss;
47423- hw_if->disable_rss = xgbe_disable_rss;
47424- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47425- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47426-
47427- DBGPR("<--xgbe_init_function_ptrs\n");
47428-}
47429+ .enable_rss = xgbe_enable_rss,
47430+ .disable_rss = xgbe_disable_rss,
47431+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47432+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47433+};
47434diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47435index e5ffb2c..e56d30b 100644
47436--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47437+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47438@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47439 * support, tell it now
47440 */
47441 if (ring->tx.xmit_more)
47442- pdata->hw_if.tx_start_xmit(channel, ring);
47443+ pdata->hw_if->tx_start_xmit(channel, ring);
47444
47445 return NETDEV_TX_BUSY;
47446 }
47447@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47448
47449 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47450 {
47451- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47452+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47453 struct xgbe_channel *channel;
47454 enum xgbe_int int_id;
47455 unsigned int i;
47456@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47457
47458 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47459 {
47460- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47461+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47462 struct xgbe_channel *channel;
47463 enum xgbe_int int_id;
47464 unsigned int i;
47465@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47466 static irqreturn_t xgbe_isr(int irq, void *data)
47467 {
47468 struct xgbe_prv_data *pdata = data;
47469- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47470+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47471 struct xgbe_channel *channel;
47472 unsigned int dma_isr, dma_ch_isr;
47473 unsigned int mac_isr, mac_tssr;
47474@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47475
47476 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47477 {
47478- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47479+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47480
47481 DBGPR("-->xgbe_init_tx_coalesce\n");
47482
47483@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47484
47485 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47486 {
47487- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47488+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47489
47490 DBGPR("-->xgbe_init_rx_coalesce\n");
47491
47492@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47493
47494 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47495 {
47496- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47497+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47498 struct xgbe_channel *channel;
47499 struct xgbe_ring *ring;
47500 struct xgbe_ring_data *rdata;
47501@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47502
47503 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47504 {
47505- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47506+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47507 struct xgbe_channel *channel;
47508 struct xgbe_ring *ring;
47509 struct xgbe_ring_data *rdata;
47510@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47511 static void xgbe_adjust_link(struct net_device *netdev)
47512 {
47513 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47514- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47515+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47516 struct phy_device *phydev = pdata->phydev;
47517 int new_state = 0;
47518
47519@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47520 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47521 {
47522 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47523- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47524+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47525 unsigned long flags;
47526
47527 DBGPR("-->xgbe_powerdown\n");
47528@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47529 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47530 {
47531 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47532- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47533+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47534 unsigned long flags;
47535
47536 DBGPR("-->xgbe_powerup\n");
47537@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47538
47539 static int xgbe_start(struct xgbe_prv_data *pdata)
47540 {
47541- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47542+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47543 struct net_device *netdev = pdata->netdev;
47544
47545 DBGPR("-->xgbe_start\n");
47546@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47547
47548 static void xgbe_stop(struct xgbe_prv_data *pdata)
47549 {
47550- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47551+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47552 struct xgbe_channel *channel;
47553 struct net_device *netdev = pdata->netdev;
47554 struct netdev_queue *txq;
47555@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47556 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47557 {
47558 struct xgbe_channel *channel;
47559- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47560+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47561 unsigned int i;
47562
47563 DBGPR("-->xgbe_restart_dev\n");
47564@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47565 return -ERANGE;
47566 }
47567
47568- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47569+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47570
47571 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47572
47573@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47574 static int xgbe_open(struct net_device *netdev)
47575 {
47576 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47577- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47578- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47579+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47580+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47581 struct xgbe_channel *channel = NULL;
47582 unsigned int i = 0;
47583 int ret;
47584@@ -1400,8 +1400,8 @@ err_phy_init:
47585 static int xgbe_close(struct net_device *netdev)
47586 {
47587 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47588- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47589- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47590+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47591+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47592 struct xgbe_channel *channel;
47593 unsigned int i;
47594
47595@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47596 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47597 {
47598 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47599- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47600- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47601+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47602+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47603 struct xgbe_channel *channel;
47604 struct xgbe_ring *ring;
47605 struct xgbe_packet_data *packet;
47606@@ -1518,7 +1518,7 @@ tx_netdev_return:
47607 static void xgbe_set_rx_mode(struct net_device *netdev)
47608 {
47609 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47610- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47611+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47612 unsigned int pr_mode, am_mode;
47613
47614 DBGPR("-->xgbe_set_rx_mode\n");
47615@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47616 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47617 {
47618 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47619- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47620+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47621 struct sockaddr *saddr = addr;
47622
47623 DBGPR("-->xgbe_set_mac_address\n");
47624@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47625
47626 DBGPR("-->%s\n", __func__);
47627
47628- pdata->hw_if.read_mmc_stats(pdata);
47629+ pdata->hw_if->read_mmc_stats(pdata);
47630
47631 s->rx_packets = pstats->rxframecount_gb;
47632 s->rx_bytes = pstats->rxoctetcount_gb;
47633@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47634 u16 vid)
47635 {
47636 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47637- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47638+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47639
47640 DBGPR("-->%s\n", __func__);
47641
47642@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47643 u16 vid)
47644 {
47645 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47646- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47647+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47648
47649 DBGPR("-->%s\n", __func__);
47650
47651@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47652 netdev_features_t features)
47653 {
47654 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47655- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47656+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47657 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47658 int ret = 0;
47659
47660@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47661 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47662 {
47663 struct xgbe_prv_data *pdata = channel->pdata;
47664- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47665+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47666 struct xgbe_ring *ring = channel->rx_ring;
47667 struct xgbe_ring_data *rdata;
47668
47669@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47670 static int xgbe_tx_poll(struct xgbe_channel *channel)
47671 {
47672 struct xgbe_prv_data *pdata = channel->pdata;
47673- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47674- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47675+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47676+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47677 struct xgbe_ring *ring = channel->tx_ring;
47678 struct xgbe_ring_data *rdata;
47679 struct xgbe_ring_desc *rdesc;
47680@@ -1891,7 +1891,7 @@ unlock:
47681 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47682 {
47683 struct xgbe_prv_data *pdata = channel->pdata;
47684- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47685+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47686 struct xgbe_ring *ring = channel->rx_ring;
47687 struct xgbe_ring_data *rdata;
47688 struct xgbe_packet_data *packet;
47689diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47690index ebf4893..28108c7 100644
47691--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47692+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47693@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47694
47695 DBGPR("-->%s\n", __func__);
47696
47697- pdata->hw_if.read_mmc_stats(pdata);
47698+ pdata->hw_if->read_mmc_stats(pdata);
47699 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47700 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47701 *data++ = *(u64 *)stat;
47702@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47703 struct ethtool_coalesce *ec)
47704 {
47705 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47706- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47707+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47708 unsigned int riwt;
47709
47710 DBGPR("-->xgbe_get_coalesce\n");
47711@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47712 struct ethtool_coalesce *ec)
47713 {
47714 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47715- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47716+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47717 unsigned int rx_frames, rx_riwt, rx_usecs;
47718 unsigned int tx_frames, tx_usecs;
47719
47720diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47721index dbd3850..4e31b38 100644
47722--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47723+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47724@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47725 DBGPR("<--xgbe_default_config\n");
47726 }
47727
47728-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47729-{
47730- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47731- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47732-}
47733-
47734 static int xgbe_probe(struct platform_device *pdev)
47735 {
47736 struct xgbe_prv_data *pdata;
47737@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47738 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47739
47740 /* Set all the function pointers */
47741- xgbe_init_all_fptrs(pdata);
47742- hw_if = &pdata->hw_if;
47743- desc_if = &pdata->desc_if;
47744+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47745+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47746
47747 /* Issue software reset to device */
47748 hw_if->exit(pdata);
47749diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47750index 363b210..b241389 100644
47751--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47752+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47753@@ -126,7 +126,7 @@
47754 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47755 {
47756 struct xgbe_prv_data *pdata = mii->priv;
47757- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47758+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47759 int mmd_data;
47760
47761 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47762@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47763 u16 mmd_val)
47764 {
47765 struct xgbe_prv_data *pdata = mii->priv;
47766- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47767+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47768 int mmd_data = mmd_val;
47769
47770 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47771diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47772index a1bf9d1c..84adcab 100644
47773--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47774+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47775@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47776 tstamp_cc);
47777 u64 nsec;
47778
47779- nsec = pdata->hw_if.get_tstamp_time(pdata);
47780+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47781
47782 return nsec;
47783 }
47784@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47785
47786 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47787
47788- pdata->hw_if.update_tstamp_addend(pdata, addend);
47789+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47790
47791 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47792
47793diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47794index f9ec762..988c969 100644
47795--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47796+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47797@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47798 int dev_irq;
47799 unsigned int per_channel_irq;
47800
47801- struct xgbe_hw_if hw_if;
47802- struct xgbe_desc_if desc_if;
47803+ const struct xgbe_hw_if *hw_if;
47804+ const struct xgbe_desc_if *desc_if;
47805
47806 /* AXI DMA settings */
47807 unsigned int axdomain;
47808@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47809 #endif
47810 };
47811
47812+extern const struct xgbe_hw_if default_xgbe_hw_if;
47813+extern const struct xgbe_desc_if default_xgbe_desc_if;
47814+
47815 /* Function prototypes*/
47816
47817 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47818diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47819index adcacda..fa6e0ae 100644
47820--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47821+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47822@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47823 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47824 {
47825 /* RX_MODE controlling object */
47826- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47827+ bnx2x_init_rx_mode_obj(bp);
47828
47829 /* multicast configuration controlling object */
47830 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47831diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47832index 07cdf9b..b08ecc7 100644
47833--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47834+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47835@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47836 return rc;
47837 }
47838
47839-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47840- struct bnx2x_rx_mode_obj *o)
47841+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47842 {
47843 if (CHIP_IS_E1x(bp)) {
47844- o->wait_comp = bnx2x_empty_rx_mode_wait;
47845- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47846+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47847+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47848 } else {
47849- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47850- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47851+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47852+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47853 }
47854 }
47855
47856diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47857index 86baecb..ff3bb46 100644
47858--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47859+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47860@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47861
47862 /********************* RX MODE ****************/
47863
47864-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47865- struct bnx2x_rx_mode_obj *o);
47866+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47867
47868 /**
47869 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47870diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47871index 31c9f82..e65e986 100644
47872--- a/drivers/net/ethernet/broadcom/tg3.h
47873+++ b/drivers/net/ethernet/broadcom/tg3.h
47874@@ -150,6 +150,7 @@
47875 #define CHIPREV_ID_5750_A0 0x4000
47876 #define CHIPREV_ID_5750_A1 0x4001
47877 #define CHIPREV_ID_5750_A3 0x4003
47878+#define CHIPREV_ID_5750_C1 0x4201
47879 #define CHIPREV_ID_5750_C2 0x4202
47880 #define CHIPREV_ID_5752_A0_HW 0x5000
47881 #define CHIPREV_ID_5752_A0 0x6000
47882diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47883index 903466e..b285864 100644
47884--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47885+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47886@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47887 }
47888
47889 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47890- bna_cb_ioceth_enable,
47891- bna_cb_ioceth_disable,
47892- bna_cb_ioceth_hbfail,
47893- bna_cb_ioceth_reset
47894+ .enable_cbfn = bna_cb_ioceth_enable,
47895+ .disable_cbfn = bna_cb_ioceth_disable,
47896+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47897+ .reset_cbfn = bna_cb_ioceth_reset
47898 };
47899
47900 static void bna_attr_init(struct bna_ioceth *ioceth)
47901diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47902index 8cffcdf..aadf043 100644
47903--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47904+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47905@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47906 */
47907 struct l2t_skb_cb {
47908 arp_failure_handler_func arp_failure_handler;
47909-};
47910+} __no_const;
47911
47912 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47913
47914diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47915index ccf3436..b720d77 100644
47916--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47917+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47918@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47919
47920 int i;
47921 struct adapter *ap = netdev2adap(dev);
47922- static const unsigned int *reg_ranges;
47923+ const unsigned int *reg_ranges;
47924 int arr_size = 0, buf_size = 0;
47925
47926 if (is_t4(ap->params.chip)) {
47927diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47928index badff18..e15c4ec 100644
47929--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47930+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47931@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47932 for (i=0; i<ETH_ALEN; i++) {
47933 tmp.addr[i] = dev->dev_addr[i];
47934 }
47935- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47936+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47937 break;
47938
47939 case DE4X5_SET_HWADDR: /* Set the hardware address */
47940@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47941 spin_lock_irqsave(&lp->lock, flags);
47942 memcpy(&statbuf, &lp->pktStats, ioc->len);
47943 spin_unlock_irqrestore(&lp->lock, flags);
47944- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47945+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47946 return -EFAULT;
47947 break;
47948 }
47949diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47950index d48806b..41cd80f 100644
47951--- a/drivers/net/ethernet/emulex/benet/be_main.c
47952+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47953@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47954
47955 if (wrapped)
47956 newacc += 65536;
47957- ACCESS_ONCE(*acc) = newacc;
47958+ ACCESS_ONCE_RW(*acc) = newacc;
47959 }
47960
47961 static void populate_erx_stats(struct be_adapter *adapter,
47962diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47963index 6d0c5d5..55be363 100644
47964--- a/drivers/net/ethernet/faraday/ftgmac100.c
47965+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47966@@ -30,6 +30,8 @@
47967 #include <linux/netdevice.h>
47968 #include <linux/phy.h>
47969 #include <linux/platform_device.h>
47970+#include <linux/interrupt.h>
47971+#include <linux/irqreturn.h>
47972 #include <net/ip.h>
47973
47974 #include "ftgmac100.h"
47975diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47976index dce5f7b..2433466 100644
47977--- a/drivers/net/ethernet/faraday/ftmac100.c
47978+++ b/drivers/net/ethernet/faraday/ftmac100.c
47979@@ -31,6 +31,8 @@
47980 #include <linux/module.h>
47981 #include <linux/netdevice.h>
47982 #include <linux/platform_device.h>
47983+#include <linux/interrupt.h>
47984+#include <linux/irqreturn.h>
47985
47986 #include "ftmac100.h"
47987
47988diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47989index 6d1ec92..4d5d97d 100644
47990--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47991+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47992@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47993 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47994
47995 /* Update the base adjustement value. */
47996- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47997+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47998 smp_mb(); /* Force the above update. */
47999 }
48000
48001diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48002index 5fd4b52..87aa34b 100644
48003--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48004+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48005@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48006 }
48007
48008 /* update the base incval used to calculate frequency adjustment */
48009- ACCESS_ONCE(adapter->base_incval) = incval;
48010+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48011 smp_mb();
48012
48013 /* need lock to prevent incorrect read while modifying cyclecounter */
48014diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48015index e3357bf..d4d5348 100644
48016--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48017+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48018@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48019 wmb();
48020
48021 /* we want to dirty this cache line once */
48022- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48023- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48024+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48025+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48026
48027 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48028
48029diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48030index 2bbd01f..e8baa64 100644
48031--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48032+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48033@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48034 struct __vxge_hw_fifo *fifo;
48035 struct vxge_hw_fifo_config *config;
48036 u32 txdl_size, txdl_per_memblock;
48037- struct vxge_hw_mempool_cbs fifo_mp_callback;
48038+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48039+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48040+ };
48041+
48042 struct __vxge_hw_virtualpath *vpath;
48043
48044 if ((vp == NULL) || (attr == NULL)) {
48045@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48046 goto exit;
48047 }
48048
48049- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48050-
48051 fifo->mempool =
48052 __vxge_hw_mempool_create(vpath->hldev,
48053 fifo->config->memblock_size,
48054diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48055index 2bb48d5..d1a865d 100644
48056--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48057+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48058@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48059 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48060 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48061 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48062- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48063+ pax_open_kernel();
48064+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48065+ pax_close_kernel();
48066 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48067 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48068 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48069diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48070index be7d7a6..a8983f8 100644
48071--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48072+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48073@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48074 case QLCNIC_NON_PRIV_FUNC:
48075 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48076 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48077- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48078+ pax_open_kernel();
48079+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48080+ pax_close_kernel();
48081 break;
48082 case QLCNIC_PRIV_FUNC:
48083 ahw->op_mode = QLCNIC_PRIV_FUNC;
48084 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48085- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48086+ pax_open_kernel();
48087+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48088+ pax_close_kernel();
48089 break;
48090 case QLCNIC_MGMT_FUNC:
48091 ahw->op_mode = QLCNIC_MGMT_FUNC;
48092 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48093- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48094+ pax_open_kernel();
48095+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48096+ pax_close_kernel();
48097 break;
48098 default:
48099 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48100diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48101index c9f57fb..208bdc1 100644
48102--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48103+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48104@@ -1285,7 +1285,7 @@ flash_temp:
48105 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48106 {
48107 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48108- static const struct qlcnic_dump_operations *fw_dump_ops;
48109+ const struct qlcnic_dump_operations *fw_dump_ops;
48110 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48111 u32 entry_offset, dump, no_entries, buf_offset = 0;
48112 int i, k, ops_cnt, ops_index, dump_size = 0;
48113diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48114index 2e2cf80..ebc796d 100644
48115--- a/drivers/net/ethernet/realtek/r8169.c
48116+++ b/drivers/net/ethernet/realtek/r8169.c
48117@@ -788,22 +788,22 @@ struct rtl8169_private {
48118 struct mdio_ops {
48119 void (*write)(struct rtl8169_private *, int, int);
48120 int (*read)(struct rtl8169_private *, int);
48121- } mdio_ops;
48122+ } __no_const mdio_ops;
48123
48124 struct pll_power_ops {
48125 void (*down)(struct rtl8169_private *);
48126 void (*up)(struct rtl8169_private *);
48127- } pll_power_ops;
48128+ } __no_const pll_power_ops;
48129
48130 struct jumbo_ops {
48131 void (*enable)(struct rtl8169_private *);
48132 void (*disable)(struct rtl8169_private *);
48133- } jumbo_ops;
48134+ } __no_const jumbo_ops;
48135
48136 struct csi_ops {
48137 void (*write)(struct rtl8169_private *, int, int);
48138 u32 (*read)(struct rtl8169_private *, int);
48139- } csi_ops;
48140+ } __no_const csi_ops;
48141
48142 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48143 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48144diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48145index 6b861e3..204ac86 100644
48146--- a/drivers/net/ethernet/sfc/ptp.c
48147+++ b/drivers/net/ethernet/sfc/ptp.c
48148@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48149 ptp->start.dma_addr);
48150
48151 /* Clear flag that signals MC ready */
48152- ACCESS_ONCE(*start) = 0;
48153+ ACCESS_ONCE_RW(*start) = 0;
48154 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48155 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48156 EFX_BUG_ON_PARANOID(rc);
48157diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48158index 08c483b..2c4a553 100644
48159--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48160+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48161@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48162
48163 writel(value, ioaddr + MMC_CNTRL);
48164
48165- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48166- MMC_CNTRL, value);
48167+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48168+// MMC_CNTRL, value);
48169 }
48170
48171 /* To mask all all interrupts.*/
48172diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48173index 384ca4f..dd7d4f9 100644
48174--- a/drivers/net/hyperv/hyperv_net.h
48175+++ b/drivers/net/hyperv/hyperv_net.h
48176@@ -171,7 +171,7 @@ struct rndis_device {
48177 enum rndis_device_state state;
48178 bool link_state;
48179 bool link_change;
48180- atomic_t new_req_id;
48181+ atomic_unchecked_t new_req_id;
48182
48183 spinlock_t request_lock;
48184 struct list_head req_list;
48185diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48186index ec0c40a..c9e42eb 100644
48187--- a/drivers/net/hyperv/rndis_filter.c
48188+++ b/drivers/net/hyperv/rndis_filter.c
48189@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48190 * template
48191 */
48192 set = &rndis_msg->msg.set_req;
48193- set->req_id = atomic_inc_return(&dev->new_req_id);
48194+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48195
48196 /* Add to the request list */
48197 spin_lock_irqsave(&dev->request_lock, flags);
48198@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48199
48200 /* Setup the rndis set */
48201 halt = &request->request_msg.msg.halt_req;
48202- halt->req_id = atomic_inc_return(&dev->new_req_id);
48203+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48204
48205 /* Ignore return since this msg is optional. */
48206 rndis_filter_send_request(dev, request);
48207diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48208index 34f846b..4a0d5b1 100644
48209--- a/drivers/net/ifb.c
48210+++ b/drivers/net/ifb.c
48211@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48212 return 0;
48213 }
48214
48215-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48216+static struct rtnl_link_ops ifb_link_ops = {
48217 .kind = "ifb",
48218 .priv_size = sizeof(struct ifb_private),
48219 .setup = ifb_setup,
48220diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48221index 612e073..a9f5eda 100644
48222--- a/drivers/net/macvlan.c
48223+++ b/drivers/net/macvlan.c
48224@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48225 free_nskb:
48226 kfree_skb(nskb);
48227 err:
48228- atomic_long_inc(&skb->dev->rx_dropped);
48229+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48230 }
48231
48232 static void macvlan_flush_sources(struct macvlan_port *port,
48233@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48234 int macvlan_link_register(struct rtnl_link_ops *ops)
48235 {
48236 /* common fields */
48237- ops->priv_size = sizeof(struct macvlan_dev);
48238- ops->validate = macvlan_validate;
48239- ops->maxtype = IFLA_MACVLAN_MAX;
48240- ops->policy = macvlan_policy;
48241- ops->changelink = macvlan_changelink;
48242- ops->get_size = macvlan_get_size;
48243- ops->fill_info = macvlan_fill_info;
48244+ pax_open_kernel();
48245+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48246+ *(void **)&ops->validate = macvlan_validate;
48247+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48248+ *(const void **)&ops->policy = macvlan_policy;
48249+ *(void **)&ops->changelink = macvlan_changelink;
48250+ *(void **)&ops->get_size = macvlan_get_size;
48251+ *(void **)&ops->fill_info = macvlan_fill_info;
48252+ pax_close_kernel();
48253
48254 return rtnl_link_register(ops);
48255 };
48256@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48257 return NOTIFY_DONE;
48258 }
48259
48260-static struct notifier_block macvlan_notifier_block __read_mostly = {
48261+static struct notifier_block macvlan_notifier_block = {
48262 .notifier_call = macvlan_device_event,
48263 };
48264
48265diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48266index 4d050ee..012f6dd 100644
48267--- a/drivers/net/macvtap.c
48268+++ b/drivers/net/macvtap.c
48269@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48270 dev->tx_queue_len = TUN_READQ_SIZE;
48271 }
48272
48273-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48274+static struct rtnl_link_ops macvtap_link_ops = {
48275 .kind = "macvtap",
48276 .setup = macvtap_setup,
48277 .newlink = macvtap_newlink,
48278@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48279
48280 ret = 0;
48281 u = q->flags;
48282- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48283+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48284 put_user(u, &ifr->ifr_flags))
48285 ret = -EFAULT;
48286 macvtap_put_vlan(vlan);
48287@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48288 return NOTIFY_DONE;
48289 }
48290
48291-static struct notifier_block macvtap_notifier_block __read_mostly = {
48292+static struct notifier_block macvtap_notifier_block = {
48293 .notifier_call = macvtap_device_event,
48294 };
48295
48296diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48297index 34924df..a747360 100644
48298--- a/drivers/net/nlmon.c
48299+++ b/drivers/net/nlmon.c
48300@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48301 return 0;
48302 }
48303
48304-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48305+static struct rtnl_link_ops nlmon_link_ops = {
48306 .kind = "nlmon",
48307 .priv_size = sizeof(struct nlmon),
48308 .setup = nlmon_setup,
48309diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48310index 3fc91e8..6c36337 100644
48311--- a/drivers/net/phy/phy_device.c
48312+++ b/drivers/net/phy/phy_device.c
48313@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48314 * zero on success.
48315 *
48316 */
48317-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48318+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48319 struct phy_c45_device_ids *c45_ids) {
48320 int phy_reg;
48321 int i, reg_addr;
48322@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48323 * its return value is in turn returned.
48324 *
48325 */
48326-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48327+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48328 bool is_c45, struct phy_c45_device_ids *c45_ids)
48329 {
48330 int phy_reg;
48331@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48332 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48333 {
48334 struct phy_c45_device_ids c45_ids = {0};
48335- u32 phy_id = 0;
48336+ int phy_id = 0;
48337 int r;
48338
48339 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48340diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48341index af034db..1611c0b2 100644
48342--- a/drivers/net/ppp/ppp_generic.c
48343+++ b/drivers/net/ppp/ppp_generic.c
48344@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48345 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48346 struct ppp_stats stats;
48347 struct ppp_comp_stats cstats;
48348- char *vers;
48349
48350 switch (cmd) {
48351 case SIOCGPPPSTATS:
48352@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48353 break;
48354
48355 case SIOCGPPPVER:
48356- vers = PPP_VERSION;
48357- if (copy_to_user(addr, vers, strlen(vers) + 1))
48358+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48359 break;
48360 err = 0;
48361 break;
48362diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48363index 079f7ad..b2a2bfa7 100644
48364--- a/drivers/net/slip/slhc.c
48365+++ b/drivers/net/slip/slhc.c
48366@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48367 register struct tcphdr *thp;
48368 register struct iphdr *ip;
48369 register struct cstate *cs;
48370- int len, hdrlen;
48371+ long len, hdrlen;
48372 unsigned char *cp = icp;
48373
48374 /* We've got a compressed packet; read the change byte */
48375diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48376index 2c087ef..4859007 100644
48377--- a/drivers/net/team/team.c
48378+++ b/drivers/net/team/team.c
48379@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
48380 return TEAM_DEFAULT_NUM_RX_QUEUES;
48381 }
48382
48383-static struct rtnl_link_ops team_link_ops __read_mostly = {
48384+static struct rtnl_link_ops team_link_ops = {
48385 .kind = DRV_NAME,
48386 .priv_size = sizeof(struct team),
48387 .setup = team_setup,
48388@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
48389 return NOTIFY_DONE;
48390 }
48391
48392-static struct notifier_block team_notifier_block __read_mostly = {
48393+static struct notifier_block team_notifier_block = {
48394 .notifier_call = team_device_event,
48395 };
48396
48397diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48398index 10f9e40..3515e7e 100644
48399--- a/drivers/net/tun.c
48400+++ b/drivers/net/tun.c
48401@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48402 return -EINVAL;
48403 }
48404
48405-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48406+static struct rtnl_link_ops tun_link_ops = {
48407 .kind = DRV_NAME,
48408 .priv_size = sizeof(struct tun_struct),
48409 .setup = tun_setup,
48410@@ -1827,7 +1827,7 @@ unlock:
48411 }
48412
48413 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48414- unsigned long arg, int ifreq_len)
48415+ unsigned long arg, size_t ifreq_len)
48416 {
48417 struct tun_file *tfile = file->private_data;
48418 struct tun_struct *tun;
48419@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48420 int le;
48421 int ret;
48422
48423+ if (ifreq_len > sizeof ifr)
48424+ return -EFAULT;
48425+
48426 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48427 if (copy_from_user(&ifr, argp, ifreq_len))
48428 return -EFAULT;
48429diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48430index 9c5aa92..8cd0405 100644
48431--- a/drivers/net/usb/hso.c
48432+++ b/drivers/net/usb/hso.c
48433@@ -71,7 +71,7 @@
48434 #include <asm/byteorder.h>
48435 #include <linux/serial_core.h>
48436 #include <linux/serial.h>
48437-
48438+#include <asm/local.h>
48439
48440 #define MOD_AUTHOR "Option Wireless"
48441 #define MOD_DESCRIPTION "USB High Speed Option driver"
48442@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48443 struct urb *urb;
48444
48445 urb = serial->rx_urb[0];
48446- if (serial->port.count > 0) {
48447+ if (atomic_read(&serial->port.count) > 0) {
48448 count = put_rxbuf_data(urb, serial);
48449 if (count == -1)
48450 return;
48451@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48452 DUMP1(urb->transfer_buffer, urb->actual_length);
48453
48454 /* Anyone listening? */
48455- if (serial->port.count == 0)
48456+ if (atomic_read(&serial->port.count) == 0)
48457 return;
48458
48459 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48460@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48461 tty_port_tty_set(&serial->port, tty);
48462
48463 /* check for port already opened, if not set the termios */
48464- serial->port.count++;
48465- if (serial->port.count == 1) {
48466+ if (atomic_inc_return(&serial->port.count) == 1) {
48467 serial->rx_state = RX_IDLE;
48468 /* Force default termio settings */
48469 _hso_serial_set_termios(tty, NULL);
48470@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48471 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48472 if (result) {
48473 hso_stop_serial_device(serial->parent);
48474- serial->port.count--;
48475+ atomic_dec(&serial->port.count);
48476 kref_put(&serial->parent->ref, hso_serial_ref_free);
48477 }
48478 } else {
48479@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48480
48481 /* reset the rts and dtr */
48482 /* do the actual close */
48483- serial->port.count--;
48484+ atomic_dec(&serial->port.count);
48485
48486- if (serial->port.count <= 0) {
48487- serial->port.count = 0;
48488+ if (atomic_read(&serial->port.count) <= 0) {
48489+ atomic_set(&serial->port.count, 0);
48490 tty_port_tty_set(&serial->port, NULL);
48491 if (!usb_gone)
48492 hso_stop_serial_device(serial->parent);
48493@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48494
48495 /* the actual setup */
48496 spin_lock_irqsave(&serial->serial_lock, flags);
48497- if (serial->port.count)
48498+ if (atomic_read(&serial->port.count))
48499 _hso_serial_set_termios(tty, old);
48500 else
48501 tty->termios = *old;
48502@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48503 D1("Pending read interrupt on port %d\n", i);
48504 spin_lock(&serial->serial_lock);
48505 if (serial->rx_state == RX_IDLE &&
48506- serial->port.count > 0) {
48507+ atomic_read(&serial->port.count) > 0) {
48508 /* Setup and send a ctrl req read on
48509 * port i */
48510 if (!serial->rx_urb_filled[0]) {
48511@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48512 /* Start all serial ports */
48513 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48514 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48515- if (dev2ser(serial_table[i])->port.count) {
48516+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48517 result =
48518 hso_start_serial_device(serial_table[i], GFP_NOIO);
48519 hso_kick_transmit(dev2ser(serial_table[i]));
48520diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48521index bf405f1..fd847ee 100644
48522--- a/drivers/net/usb/r8152.c
48523+++ b/drivers/net/usb/r8152.c
48524@@ -571,7 +571,7 @@ struct r8152 {
48525 void (*unload)(struct r8152 *);
48526 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48527 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48528- } rtl_ops;
48529+ } __no_const rtl_ops;
48530
48531 int intr_interval;
48532 u32 saved_wolopts;
48533diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48534index a2515887..6d13233 100644
48535--- a/drivers/net/usb/sierra_net.c
48536+++ b/drivers/net/usb/sierra_net.c
48537@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48538 /* atomic counter partially included in MAC address to make sure 2 devices
48539 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48540 */
48541-static atomic_t iface_counter = ATOMIC_INIT(0);
48542+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48543
48544 /*
48545 * SYNC Timer Delay definition used to set the expiry time
48546@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48547 dev->net->netdev_ops = &sierra_net_device_ops;
48548
48549 /* change MAC addr to include, ifacenum, and to be unique */
48550- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48551+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48552 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48553
48554 /* we will have to manufacture ethernet headers, prepare template */
48555diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48556index 0ad6c0c..4013638 100644
48557--- a/drivers/net/virtio_net.c
48558+++ b/drivers/net/virtio_net.c
48559@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48560 #define RECEIVE_AVG_WEIGHT 64
48561
48562 /* Minimum alignment for mergeable packet buffers. */
48563-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48564+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48565
48566 #define VIRTNET_DRIVER_VERSION "1.0.0"
48567
48568diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48569index a8c755d..a988b71 100644
48570--- a/drivers/net/vxlan.c
48571+++ b/drivers/net/vxlan.c
48572@@ -2702,7 +2702,7 @@ nla_put_failure:
48573 return -EMSGSIZE;
48574 }
48575
48576-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48577+static struct rtnl_link_ops vxlan_link_ops = {
48578 .kind = "vxlan",
48579 .maxtype = IFLA_VXLAN_MAX,
48580 .policy = vxlan_policy,
48581@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48582 return NOTIFY_DONE;
48583 }
48584
48585-static struct notifier_block vxlan_notifier_block __read_mostly = {
48586+static struct notifier_block vxlan_notifier_block = {
48587 .notifier_call = vxlan_lowerdev_event,
48588 };
48589
48590diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48591index 5920c99..ff2e4a5 100644
48592--- a/drivers/net/wan/lmc/lmc_media.c
48593+++ b/drivers/net/wan/lmc/lmc_media.c
48594@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48595 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48596
48597 lmc_media_t lmc_ds3_media = {
48598- lmc_ds3_init, /* special media init stuff */
48599- lmc_ds3_default, /* reset to default state */
48600- lmc_ds3_set_status, /* reset status to state provided */
48601- lmc_dummy_set_1, /* set clock source */
48602- lmc_dummy_set2_1, /* set line speed */
48603- lmc_ds3_set_100ft, /* set cable length */
48604- lmc_ds3_set_scram, /* set scrambler */
48605- lmc_ds3_get_link_status, /* get link status */
48606- lmc_dummy_set_1, /* set link status */
48607- lmc_ds3_set_crc_length, /* set CRC length */
48608- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48609- lmc_ds3_watchdog
48610+ .init = lmc_ds3_init, /* special media init stuff */
48611+ .defaults = lmc_ds3_default, /* reset to default state */
48612+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48613+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48614+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48615+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48616+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48617+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48618+ .set_link_status = lmc_dummy_set_1, /* set link status */
48619+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48620+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48621+ .watchdog = lmc_ds3_watchdog
48622 };
48623
48624 lmc_media_t lmc_hssi_media = {
48625- lmc_hssi_init, /* special media init stuff */
48626- lmc_hssi_default, /* reset to default state */
48627- lmc_hssi_set_status, /* reset status to state provided */
48628- lmc_hssi_set_clock, /* set clock source */
48629- lmc_dummy_set2_1, /* set line speed */
48630- lmc_dummy_set_1, /* set cable length */
48631- lmc_dummy_set_1, /* set scrambler */
48632- lmc_hssi_get_link_status, /* get link status */
48633- lmc_hssi_set_link_status, /* set link status */
48634- lmc_hssi_set_crc_length, /* set CRC length */
48635- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48636- lmc_hssi_watchdog
48637+ .init = lmc_hssi_init, /* special media init stuff */
48638+ .defaults = lmc_hssi_default, /* reset to default state */
48639+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48640+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48641+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48642+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48643+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48644+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48645+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48646+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48647+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48648+ .watchdog = lmc_hssi_watchdog
48649 };
48650
48651-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48652- lmc_ssi_default, /* reset to default state */
48653- lmc_ssi_set_status, /* reset status to state provided */
48654- lmc_ssi_set_clock, /* set clock source */
48655- lmc_ssi_set_speed, /* set line speed */
48656- lmc_dummy_set_1, /* set cable length */
48657- lmc_dummy_set_1, /* set scrambler */
48658- lmc_ssi_get_link_status, /* get link status */
48659- lmc_ssi_set_link_status, /* set link status */
48660- lmc_ssi_set_crc_length, /* set CRC length */
48661- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48662- lmc_ssi_watchdog
48663+lmc_media_t lmc_ssi_media = {
48664+ .init = lmc_ssi_init, /* special media init stuff */
48665+ .defaults = lmc_ssi_default, /* reset to default state */
48666+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48667+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48668+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48669+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48670+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48671+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48672+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48673+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48674+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48675+ .watchdog = lmc_ssi_watchdog
48676 };
48677
48678 lmc_media_t lmc_t1_media = {
48679- lmc_t1_init, /* special media init stuff */
48680- lmc_t1_default, /* reset to default state */
48681- lmc_t1_set_status, /* reset status to state provided */
48682- lmc_t1_set_clock, /* set clock source */
48683- lmc_dummy_set2_1, /* set line speed */
48684- lmc_dummy_set_1, /* set cable length */
48685- lmc_dummy_set_1, /* set scrambler */
48686- lmc_t1_get_link_status, /* get link status */
48687- lmc_dummy_set_1, /* set link status */
48688- lmc_t1_set_crc_length, /* set CRC length */
48689- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48690- lmc_t1_watchdog
48691+ .init = lmc_t1_init, /* special media init stuff */
48692+ .defaults = lmc_t1_default, /* reset to default state */
48693+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48694+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48695+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48696+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48697+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48698+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48699+ .set_link_status = lmc_dummy_set_1, /* set link status */
48700+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48701+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48702+ .watchdog = lmc_t1_watchdog
48703 };
48704
48705 static void
48706diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48707index feacc3b..5bac0de 100644
48708--- a/drivers/net/wan/z85230.c
48709+++ b/drivers/net/wan/z85230.c
48710@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48711
48712 struct z8530_irqhandler z8530_sync =
48713 {
48714- z8530_rx,
48715- z8530_tx,
48716- z8530_status
48717+ .rx = z8530_rx,
48718+ .tx = z8530_tx,
48719+ .status = z8530_status
48720 };
48721
48722 EXPORT_SYMBOL(z8530_sync);
48723@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48724 }
48725
48726 static struct z8530_irqhandler z8530_dma_sync = {
48727- z8530_dma_rx,
48728- z8530_dma_tx,
48729- z8530_dma_status
48730+ .rx = z8530_dma_rx,
48731+ .tx = z8530_dma_tx,
48732+ .status = z8530_dma_status
48733 };
48734
48735 static struct z8530_irqhandler z8530_txdma_sync = {
48736- z8530_rx,
48737- z8530_dma_tx,
48738- z8530_dma_status
48739+ .rx = z8530_rx,
48740+ .tx = z8530_dma_tx,
48741+ .status = z8530_dma_status
48742 };
48743
48744 /**
48745@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48746
48747 struct z8530_irqhandler z8530_nop=
48748 {
48749- z8530_rx_clear,
48750- z8530_tx_clear,
48751- z8530_status_clear
48752+ .rx = z8530_rx_clear,
48753+ .tx = z8530_tx_clear,
48754+ .status = z8530_status_clear
48755 };
48756
48757
48758diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48759index 0b60295..b8bfa5b 100644
48760--- a/drivers/net/wimax/i2400m/rx.c
48761+++ b/drivers/net/wimax/i2400m/rx.c
48762@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48763 if (i2400m->rx_roq == NULL)
48764 goto error_roq_alloc;
48765
48766- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48767+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48768 GFP_KERNEL);
48769 if (rd == NULL) {
48770 result = -ENOMEM;
48771diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48772index e71a2ce..2268d61 100644
48773--- a/drivers/net/wireless/airo.c
48774+++ b/drivers/net/wireless/airo.c
48775@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48776 struct airo_info *ai = dev->ml_priv;
48777 int ridcode;
48778 int enabled;
48779- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48780+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48781 unsigned char *iobuf;
48782
48783 /* Only super-user can write RIDs */
48784diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48785index da92bfa..5a9001a 100644
48786--- a/drivers/net/wireless/at76c50x-usb.c
48787+++ b/drivers/net/wireless/at76c50x-usb.c
48788@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48789 }
48790
48791 /* Convert timeout from the DFU status to jiffies */
48792-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48793+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48794 {
48795 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48796 | (s->poll_timeout[1] << 8)
48797diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48798index f1946a6..cd367fb 100644
48799--- a/drivers/net/wireless/ath/ath10k/htc.c
48800+++ b/drivers/net/wireless/ath/ath10k/htc.c
48801@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48802 /* registered target arrival callback from the HIF layer */
48803 int ath10k_htc_init(struct ath10k *ar)
48804 {
48805- struct ath10k_hif_cb htc_callbacks;
48806+ static struct ath10k_hif_cb htc_callbacks = {
48807+ .rx_completion = ath10k_htc_rx_completion_handler,
48808+ .tx_completion = ath10k_htc_tx_completion_handler,
48809+ };
48810 struct ath10k_htc_ep *ep = NULL;
48811 struct ath10k_htc *htc = &ar->htc;
48812
48813@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48814 ath10k_htc_reset_endpoint_states(htc);
48815
48816 /* setup HIF layer callbacks */
48817- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48818- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48819 htc->ar = ar;
48820
48821 /* Get HIF default pipe for HTC message exchange */
48822diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48823index 527179c..a890150 100644
48824--- a/drivers/net/wireless/ath/ath10k/htc.h
48825+++ b/drivers/net/wireless/ath/ath10k/htc.h
48826@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48827
48828 struct ath10k_htc_ops {
48829 void (*target_send_suspend_complete)(struct ath10k *ar);
48830-};
48831+} __no_const;
48832
48833 struct ath10k_htc_ep_ops {
48834 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48835 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48836 void (*ep_tx_credits)(struct ath10k *);
48837-};
48838+} __no_const;
48839
48840 /* service connection information */
48841 struct ath10k_htc_svc_conn_req {
48842diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48843index f816909..e56cd8b 100644
48844--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48845+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48846@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48847 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48848 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48849
48850- ACCESS_ONCE(ads->ds_link) = i->link;
48851- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48852+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48853+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48854
48855 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48856 ctl6 = SM(i->keytype, AR_EncrType);
48857@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48858
48859 if ((i->is_first || i->is_last) &&
48860 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48861- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48862+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48863 | set11nTries(i->rates, 1)
48864 | set11nTries(i->rates, 2)
48865 | set11nTries(i->rates, 3)
48866 | (i->dur_update ? AR_DurUpdateEna : 0)
48867 | SM(0, AR_BurstDur);
48868
48869- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48870+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48871 | set11nRate(i->rates, 1)
48872 | set11nRate(i->rates, 2)
48873 | set11nRate(i->rates, 3);
48874 } else {
48875- ACCESS_ONCE(ads->ds_ctl2) = 0;
48876- ACCESS_ONCE(ads->ds_ctl3) = 0;
48877+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48878+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48879 }
48880
48881 if (!i->is_first) {
48882- ACCESS_ONCE(ads->ds_ctl0) = 0;
48883- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48884- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48885+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48886+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48887+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48888 return;
48889 }
48890
48891@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48892 break;
48893 }
48894
48895- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48896+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48897 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48898 | SM(i->txpower[0], AR_XmitPower0)
48899 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48900@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48901 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48902 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48903
48904- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48905- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48906+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48907+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48908
48909 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48910 return;
48911
48912- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48913+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48914 | set11nPktDurRTSCTS(i->rates, 1);
48915
48916- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48917+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48918 | set11nPktDurRTSCTS(i->rates, 3);
48919
48920- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48921+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48922 | set11nRateFlags(i->rates, 1)
48923 | set11nRateFlags(i->rates, 2)
48924 | set11nRateFlags(i->rates, 3)
48925 | SM(i->rtscts_rate, AR_RTSCTSRate);
48926
48927- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48928- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48929- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48930+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48931+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48932+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48933 }
48934
48935 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48936diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48937index da84b70..83e4978 100644
48938--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48939+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48940@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48941 (i->qcu << AR_TxQcuNum_S) | desc_len;
48942
48943 checksum += val;
48944- ACCESS_ONCE(ads->info) = val;
48945+ ACCESS_ONCE_RW(ads->info) = val;
48946
48947 checksum += i->link;
48948- ACCESS_ONCE(ads->link) = i->link;
48949+ ACCESS_ONCE_RW(ads->link) = i->link;
48950
48951 checksum += i->buf_addr[0];
48952- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48953+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48954 checksum += i->buf_addr[1];
48955- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48956+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48957 checksum += i->buf_addr[2];
48958- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48959+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48960 checksum += i->buf_addr[3];
48961- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48962+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48963
48964 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48965- ACCESS_ONCE(ads->ctl3) = val;
48966+ ACCESS_ONCE_RW(ads->ctl3) = val;
48967 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48968- ACCESS_ONCE(ads->ctl5) = val;
48969+ ACCESS_ONCE_RW(ads->ctl5) = val;
48970 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48971- ACCESS_ONCE(ads->ctl7) = val;
48972+ ACCESS_ONCE_RW(ads->ctl7) = val;
48973 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48974- ACCESS_ONCE(ads->ctl9) = val;
48975+ ACCESS_ONCE_RW(ads->ctl9) = val;
48976
48977 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48978- ACCESS_ONCE(ads->ctl10) = checksum;
48979+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48980
48981 if (i->is_first || i->is_last) {
48982- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
48983+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
48984 | set11nTries(i->rates, 1)
48985 | set11nTries(i->rates, 2)
48986 | set11nTries(i->rates, 3)
48987 | (i->dur_update ? AR_DurUpdateEna : 0)
48988 | SM(0, AR_BurstDur);
48989
48990- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
48991+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
48992 | set11nRate(i->rates, 1)
48993 | set11nRate(i->rates, 2)
48994 | set11nRate(i->rates, 3);
48995 } else {
48996- ACCESS_ONCE(ads->ctl13) = 0;
48997- ACCESS_ONCE(ads->ctl14) = 0;
48998+ ACCESS_ONCE_RW(ads->ctl13) = 0;
48999+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49000 }
49001
49002 ads->ctl20 = 0;
49003@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49004
49005 ctl17 = SM(i->keytype, AR_EncrType);
49006 if (!i->is_first) {
49007- ACCESS_ONCE(ads->ctl11) = 0;
49008- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49009- ACCESS_ONCE(ads->ctl15) = 0;
49010- ACCESS_ONCE(ads->ctl16) = 0;
49011- ACCESS_ONCE(ads->ctl17) = ctl17;
49012- ACCESS_ONCE(ads->ctl18) = 0;
49013- ACCESS_ONCE(ads->ctl19) = 0;
49014+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49015+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49016+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49017+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49018+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49019+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49020+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49021 return;
49022 }
49023
49024- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49025+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49026 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49027 | SM(i->txpower[0], AR_XmitPower0)
49028 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49029@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49030 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49031 ctl12 |= SM(val, AR_PAPRDChainMask);
49032
49033- ACCESS_ONCE(ads->ctl12) = ctl12;
49034- ACCESS_ONCE(ads->ctl17) = ctl17;
49035+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49036+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49037
49038- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49039+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49040 | set11nPktDurRTSCTS(i->rates, 1);
49041
49042- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49043+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49044 | set11nPktDurRTSCTS(i->rates, 3);
49045
49046- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49047+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49048 | set11nRateFlags(i->rates, 1)
49049 | set11nRateFlags(i->rates, 2)
49050 | set11nRateFlags(i->rates, 3)
49051 | SM(i->rtscts_rate, AR_RTSCTSRate);
49052
49053- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49054+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49055
49056- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49057- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49058- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49059+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49060+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49061+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49062 }
49063
49064 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49065diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49066index 1cbd335..27dfb40 100644
49067--- a/drivers/net/wireless/ath/ath9k/hw.h
49068+++ b/drivers/net/wireless/ath/ath9k/hw.h
49069@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
49070
49071 /* ANI */
49072 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49073-};
49074+} __no_const;
49075
49076 /**
49077 * struct ath_spec_scan - parameters for Atheros spectral scan
49078@@ -716,7 +716,7 @@ struct ath_hw_ops {
49079 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49080 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49081 #endif
49082-};
49083+} __no_const;
49084
49085 struct ath_nf_limits {
49086 s16 max;
49087diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49088index 62b0bf4..4ae094c 100644
49089--- a/drivers/net/wireless/ath/ath9k/main.c
49090+++ b/drivers/net/wireless/ath/ath9k/main.c
49091@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
49092 if (!ath9k_is_chanctx_enabled())
49093 return;
49094
49095- ath9k_ops.hw_scan = ath9k_hw_scan;
49096- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49097- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49098- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49099- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49100- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49101- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49102- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49103- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49104- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49105+ pax_open_kernel();
49106+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49107+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49108+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49109+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49110+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49111+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49112+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49113+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49114+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49115+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49116+ pax_close_kernel();
49117 }
49118
49119 #endif
49120diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49121index 058a9f2..d5cb1ba 100644
49122--- a/drivers/net/wireless/b43/phy_lp.c
49123+++ b/drivers/net/wireless/b43/phy_lp.c
49124@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49125 {
49126 struct ssb_bus *bus = dev->dev->sdev->bus;
49127
49128- static const struct b206x_channel *chandata = NULL;
49129+ const struct b206x_channel *chandata = NULL;
49130 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49131 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49132 u16 old_comm15, scale;
49133diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49134index dc1d20c..f7a4f06 100644
49135--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49136+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49137@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49138 */
49139 if (il3945_mod_params.disable_hw_scan) {
49140 D_INFO("Disabling hw_scan\n");
49141- il3945_mac_ops.hw_scan = NULL;
49142+ pax_open_kernel();
49143+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49144+ pax_close_kernel();
49145 }
49146
49147 D_INFO("*** LOAD DRIVER ***\n");
49148diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49149index 0ffb6ff..c0b7f0e 100644
49150--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49151+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49152@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49153 {
49154 struct iwl_priv *priv = file->private_data;
49155 char buf[64];
49156- int buf_size;
49157+ size_t buf_size;
49158 u32 offset, len;
49159
49160 memset(buf, 0, sizeof(buf));
49161@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49162 struct iwl_priv *priv = file->private_data;
49163
49164 char buf[8];
49165- int buf_size;
49166+ size_t buf_size;
49167 u32 reset_flag;
49168
49169 memset(buf, 0, sizeof(buf));
49170@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49171 {
49172 struct iwl_priv *priv = file->private_data;
49173 char buf[8];
49174- int buf_size;
49175+ size_t buf_size;
49176 int ht40;
49177
49178 memset(buf, 0, sizeof(buf));
49179@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49180 {
49181 struct iwl_priv *priv = file->private_data;
49182 char buf[8];
49183- int buf_size;
49184+ size_t buf_size;
49185 int value;
49186
49187 memset(buf, 0, sizeof(buf));
49188@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49189 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49190 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49191
49192-static const char *fmt_value = " %-30s %10u\n";
49193-static const char *fmt_hex = " %-30s 0x%02X\n";
49194-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49195-static const char *fmt_header =
49196+static const char fmt_value[] = " %-30s %10u\n";
49197+static const char fmt_hex[] = " %-30s 0x%02X\n";
49198+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49199+static const char fmt_header[] =
49200 "%-32s current cumulative delta max\n";
49201
49202 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49203@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49204 {
49205 struct iwl_priv *priv = file->private_data;
49206 char buf[8];
49207- int buf_size;
49208+ size_t buf_size;
49209 int clear;
49210
49211 memset(buf, 0, sizeof(buf));
49212@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49213 {
49214 struct iwl_priv *priv = file->private_data;
49215 char buf[8];
49216- int buf_size;
49217+ size_t buf_size;
49218 int trace;
49219
49220 memset(buf, 0, sizeof(buf));
49221@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49222 {
49223 struct iwl_priv *priv = file->private_data;
49224 char buf[8];
49225- int buf_size;
49226+ size_t buf_size;
49227 int missed;
49228
49229 memset(buf, 0, sizeof(buf));
49230@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49231
49232 struct iwl_priv *priv = file->private_data;
49233 char buf[8];
49234- int buf_size;
49235+ size_t buf_size;
49236 int plcp;
49237
49238 memset(buf, 0, sizeof(buf));
49239@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49240
49241 struct iwl_priv *priv = file->private_data;
49242 char buf[8];
49243- int buf_size;
49244+ size_t buf_size;
49245 int flush;
49246
49247 memset(buf, 0, sizeof(buf));
49248@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49249
49250 struct iwl_priv *priv = file->private_data;
49251 char buf[8];
49252- int buf_size;
49253+ size_t buf_size;
49254 int rts;
49255
49256 if (!priv->cfg->ht_params)
49257@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49258 {
49259 struct iwl_priv *priv = file->private_data;
49260 char buf[8];
49261- int buf_size;
49262+ size_t buf_size;
49263
49264 memset(buf, 0, sizeof(buf));
49265 buf_size = min(count, sizeof(buf) - 1);
49266@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49267 struct iwl_priv *priv = file->private_data;
49268 u32 event_log_flag;
49269 char buf[8];
49270- int buf_size;
49271+ size_t buf_size;
49272
49273 /* check that the interface is up */
49274 if (!iwl_is_ready(priv))
49275@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49276 struct iwl_priv *priv = file->private_data;
49277 char buf[8];
49278 u32 calib_disabled;
49279- int buf_size;
49280+ size_t buf_size;
49281
49282 memset(buf, 0, sizeof(buf));
49283 buf_size = min(count, sizeof(buf) - 1);
49284diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49285index 523fe0c..0d9473b 100644
49286--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49287+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49288@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49289 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49290
49291 char buf[8];
49292- int buf_size;
49293+ size_t buf_size;
49294 u32 reset_flag;
49295
49296 memset(buf, 0, sizeof(buf));
49297@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49298 {
49299 struct iwl_trans *trans = file->private_data;
49300 char buf[8];
49301- int buf_size;
49302+ size_t buf_size;
49303 int csr;
49304
49305 memset(buf, 0, sizeof(buf));
49306diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49307index ef58a88..fafa731 100644
49308--- a/drivers/net/wireless/mac80211_hwsim.c
49309+++ b/drivers/net/wireless/mac80211_hwsim.c
49310@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49311 if (channels < 1)
49312 return -EINVAL;
49313
49314- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49315- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49316- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49317- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49318- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49319- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49320- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49321- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49322- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49323- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49324- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49325- mac80211_hwsim_assign_vif_chanctx;
49326- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49327- mac80211_hwsim_unassign_vif_chanctx;
49328+ pax_open_kernel();
49329+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49330+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49331+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49332+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49333+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49334+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49335+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49336+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49337+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49338+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49339+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49340+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49341+ pax_close_kernel();
49342
49343 spin_lock_init(&hwsim_radio_lock);
49344 INIT_LIST_HEAD(&hwsim_radios);
49345diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49346index 1a4facd..a2ecbbd 100644
49347--- a/drivers/net/wireless/rndis_wlan.c
49348+++ b/drivers/net/wireless/rndis_wlan.c
49349@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49350
49351 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49352
49353- if (rts_threshold < 0 || rts_threshold > 2347)
49354+ if (rts_threshold > 2347)
49355 rts_threshold = 2347;
49356
49357 tmp = cpu_to_le32(rts_threshold);
49358diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49359index 9bb398b..b0cc047 100644
49360--- a/drivers/net/wireless/rt2x00/rt2x00.h
49361+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49362@@ -375,7 +375,7 @@ struct rt2x00_intf {
49363 * for hardware which doesn't support hardware
49364 * sequence counting.
49365 */
49366- atomic_t seqno;
49367+ atomic_unchecked_t seqno;
49368 };
49369
49370 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49371diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49372index 66ff364..3ce34f7 100644
49373--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49374+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49375@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49376 * sequence counter given by mac80211.
49377 */
49378 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49379- seqno = atomic_add_return(0x10, &intf->seqno);
49380+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49381 else
49382- seqno = atomic_read(&intf->seqno);
49383+ seqno = atomic_read_unchecked(&intf->seqno);
49384
49385 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49386 hdr->seq_ctrl |= cpu_to_le16(seqno);
49387diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49388index b661f896..ddf7d2b 100644
49389--- a/drivers/net/wireless/ti/wl1251/sdio.c
49390+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49391@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49392
49393 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49394
49395- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49396- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49397+ pax_open_kernel();
49398+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49399+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49400+ pax_close_kernel();
49401
49402 wl1251_info("using dedicated interrupt line");
49403 } else {
49404- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49405- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49406+ pax_open_kernel();
49407+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49408+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49409+ pax_close_kernel();
49410
49411 wl1251_info("using SDIO interrupt");
49412 }
49413diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49414index d6d0d6d..60c23a0 100644
49415--- a/drivers/net/wireless/ti/wl12xx/main.c
49416+++ b/drivers/net/wireless/ti/wl12xx/main.c
49417@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49418 sizeof(wl->conf.mem));
49419
49420 /* read data preparation is only needed by wl127x */
49421- wl->ops->prepare_read = wl127x_prepare_read;
49422+ pax_open_kernel();
49423+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49424+ pax_close_kernel();
49425
49426 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49427 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49428@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49429 sizeof(wl->conf.mem));
49430
49431 /* read data preparation is only needed by wl127x */
49432- wl->ops->prepare_read = wl127x_prepare_read;
49433+ pax_open_kernel();
49434+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49435+ pax_close_kernel();
49436
49437 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49438 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49439diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49440index 8e56261..9140678 100644
49441--- a/drivers/net/wireless/ti/wl18xx/main.c
49442+++ b/drivers/net/wireless/ti/wl18xx/main.c
49443@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49444 }
49445
49446 if (!checksum_param) {
49447- wl18xx_ops.set_rx_csum = NULL;
49448- wl18xx_ops.init_vif = NULL;
49449+ pax_open_kernel();
49450+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49451+ *(void **)&wl18xx_ops.init_vif = NULL;
49452+ pax_close_kernel();
49453 }
49454
49455 /* Enable 11a Band only if we have 5G antennas */
49456diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49457index a912dc0..a8225ba 100644
49458--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49459+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49460@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49461 {
49462 struct zd_usb *usb = urb->context;
49463 struct zd_usb_interrupt *intr = &usb->intr;
49464- int len;
49465+ unsigned int len;
49466 u16 int_num;
49467
49468 ZD_ASSERT(in_interrupt());
49469diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49470index ce2e2cf..f81e500 100644
49471--- a/drivers/nfc/nfcwilink.c
49472+++ b/drivers/nfc/nfcwilink.c
49473@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49474
49475 static int nfcwilink_probe(struct platform_device *pdev)
49476 {
49477- static struct nfcwilink *drv;
49478+ struct nfcwilink *drv;
49479 int rc;
49480 __u32 protocols;
49481
49482diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49483index f2596c8..50d53af 100644
49484--- a/drivers/nfc/st21nfca/st21nfca.c
49485+++ b/drivers/nfc/st21nfca/st21nfca.c
49486@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49487 goto exit;
49488 }
49489
49490- gate = uid_skb->data;
49491+ memcpy(gate, uid_skb->data, uid_skb->len);
49492 *len = uid_skb->len;
49493 exit:
49494 kfree_skb(uid_skb);
49495diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49496index 5100742..6ad4e6d 100644
49497--- a/drivers/of/fdt.c
49498+++ b/drivers/of/fdt.c
49499@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49500 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49501 return 0;
49502 }
49503- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49504+ pax_open_kernel();
49505+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49506+ pax_close_kernel();
49507 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49508 }
49509 late_initcall(of_fdt_raw_init);
49510diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49511index d93b2b6..ae50401 100644
49512--- a/drivers/oprofile/buffer_sync.c
49513+++ b/drivers/oprofile/buffer_sync.c
49514@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49515 if (cookie == NO_COOKIE)
49516 offset = pc;
49517 if (cookie == INVALID_COOKIE) {
49518- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49519+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49520 offset = pc;
49521 }
49522 if (cookie != last_cookie) {
49523@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49524 /* add userspace sample */
49525
49526 if (!mm) {
49527- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49528+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49529 return 0;
49530 }
49531
49532 cookie = lookup_dcookie(mm, s->eip, &offset);
49533
49534 if (cookie == INVALID_COOKIE) {
49535- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49536+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49537 return 0;
49538 }
49539
49540@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49541 /* ignore backtraces if failed to add a sample */
49542 if (state == sb_bt_start) {
49543 state = sb_bt_ignore;
49544- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49545+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49546 }
49547 }
49548 release_mm(mm);
49549diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49550index c0cc4e7..44d4e54 100644
49551--- a/drivers/oprofile/event_buffer.c
49552+++ b/drivers/oprofile/event_buffer.c
49553@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49554 }
49555
49556 if (buffer_pos == buffer_size) {
49557- atomic_inc(&oprofile_stats.event_lost_overflow);
49558+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49559 return;
49560 }
49561
49562diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49563index ed2c3ec..deda85a 100644
49564--- a/drivers/oprofile/oprof.c
49565+++ b/drivers/oprofile/oprof.c
49566@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49567 if (oprofile_ops.switch_events())
49568 return;
49569
49570- atomic_inc(&oprofile_stats.multiplex_counter);
49571+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49572 start_switch_worker();
49573 }
49574
49575diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49576index ee2cfce..7f8f699 100644
49577--- a/drivers/oprofile/oprofile_files.c
49578+++ b/drivers/oprofile/oprofile_files.c
49579@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49580
49581 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49582
49583-static ssize_t timeout_read(struct file *file, char __user *buf,
49584+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49585 size_t count, loff_t *offset)
49586 {
49587 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49588diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49589index 59659ce..6c860a0 100644
49590--- a/drivers/oprofile/oprofile_stats.c
49591+++ b/drivers/oprofile/oprofile_stats.c
49592@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49593 cpu_buf->sample_invalid_eip = 0;
49594 }
49595
49596- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49597- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49598- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49599- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49600- atomic_set(&oprofile_stats.multiplex_counter, 0);
49601+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49602+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49603+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49604+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49605+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49606 }
49607
49608
49609diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49610index 1fc622b..8c48fc3 100644
49611--- a/drivers/oprofile/oprofile_stats.h
49612+++ b/drivers/oprofile/oprofile_stats.h
49613@@ -13,11 +13,11 @@
49614 #include <linux/atomic.h>
49615
49616 struct oprofile_stat_struct {
49617- atomic_t sample_lost_no_mm;
49618- atomic_t sample_lost_no_mapping;
49619- atomic_t bt_lost_no_mapping;
49620- atomic_t event_lost_overflow;
49621- atomic_t multiplex_counter;
49622+ atomic_unchecked_t sample_lost_no_mm;
49623+ atomic_unchecked_t sample_lost_no_mapping;
49624+ atomic_unchecked_t bt_lost_no_mapping;
49625+ atomic_unchecked_t event_lost_overflow;
49626+ atomic_unchecked_t multiplex_counter;
49627 };
49628
49629 extern struct oprofile_stat_struct oprofile_stats;
49630diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49631index 3f49345..c750d0b 100644
49632--- a/drivers/oprofile/oprofilefs.c
49633+++ b/drivers/oprofile/oprofilefs.c
49634@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49635
49636 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49637 {
49638- atomic_t *val = file->private_data;
49639- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49640+ atomic_unchecked_t *val = file->private_data;
49641+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49642 }
49643
49644
49645@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49646
49647
49648 int oprofilefs_create_ro_atomic(struct dentry *root,
49649- char const *name, atomic_t *val)
49650+ char const *name, atomic_unchecked_t *val)
49651 {
49652 return __oprofilefs_create_file(root, name,
49653 &atomic_ro_fops, 0444, val);
49654diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49655index bdef916..88c7dee 100644
49656--- a/drivers/oprofile/timer_int.c
49657+++ b/drivers/oprofile/timer_int.c
49658@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49659 return NOTIFY_OK;
49660 }
49661
49662-static struct notifier_block __refdata oprofile_cpu_notifier = {
49663+static struct notifier_block oprofile_cpu_notifier = {
49664 .notifier_call = oprofile_cpu_notify,
49665 };
49666
49667diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49668index 3b47080..6cd05dd 100644
49669--- a/drivers/parport/procfs.c
49670+++ b/drivers/parport/procfs.c
49671@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49672
49673 *ppos += len;
49674
49675- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49676+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49677 }
49678
49679 #ifdef CONFIG_PARPORT_1284
49680@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49681
49682 *ppos += len;
49683
49684- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49685+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49686 }
49687 #endif /* IEEE1284.3 support. */
49688
49689diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49690index 6ca2399..68d866b 100644
49691--- a/drivers/pci/hotplug/acpiphp_ibm.c
49692+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49693@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49694 goto init_cleanup;
49695 }
49696
49697- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49698+ pax_open_kernel();
49699+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49700+ pax_close_kernel();
49701 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49702
49703 return retval;
49704diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49705index 66b7bbe..26bee78 100644
49706--- a/drivers/pci/hotplug/cpcihp_generic.c
49707+++ b/drivers/pci/hotplug/cpcihp_generic.c
49708@@ -73,7 +73,6 @@ static u16 port;
49709 static unsigned int enum_bit;
49710 static u8 enum_mask;
49711
49712-static struct cpci_hp_controller_ops generic_hpc_ops;
49713 static struct cpci_hp_controller generic_hpc;
49714
49715 static int __init validate_parameters(void)
49716@@ -139,6 +138,10 @@ static int query_enum(void)
49717 return ((value & enum_mask) == enum_mask);
49718 }
49719
49720+static struct cpci_hp_controller_ops generic_hpc_ops = {
49721+ .query_enum = query_enum,
49722+};
49723+
49724 static int __init cpcihp_generic_init(void)
49725 {
49726 int status;
49727@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49728 pci_dev_put(dev);
49729
49730 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49731- generic_hpc_ops.query_enum = query_enum;
49732 generic_hpc.ops = &generic_hpc_ops;
49733
49734 status = cpci_hp_register_controller(&generic_hpc);
49735diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49736index 7ecf34e..effed62 100644
49737--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49738+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49739@@ -59,7 +59,6 @@
49740 /* local variables */
49741 static bool debug;
49742 static bool poll;
49743-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49744 static struct cpci_hp_controller zt5550_hpc;
49745
49746 /* Primary cPCI bus bridge device */
49747@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49748 return 0;
49749 }
49750
49751+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49752+ .query_enum = zt5550_hc_query_enum,
49753+};
49754+
49755 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49756 {
49757 int status;
49758@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49759 dbg("returned from zt5550_hc_config");
49760
49761 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49762- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49763 zt5550_hpc.ops = &zt5550_hpc_ops;
49764 if (!poll) {
49765 zt5550_hpc.irq = hc_dev->irq;
49766 zt5550_hpc.irq_flags = IRQF_SHARED;
49767 zt5550_hpc.dev_id = hc_dev;
49768
49769- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49770- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49771- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49772+ pax_open_kernel();
49773+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49774+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49775+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49776+ pax_open_kernel();
49777 } else {
49778 info("using ENUM# polling mode");
49779 }
49780diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49781index 1e08ff8c..3cd145f 100644
49782--- a/drivers/pci/hotplug/cpqphp_nvram.c
49783+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49784@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49785
49786 void compaq_nvram_init (void __iomem *rom_start)
49787 {
49788+#ifndef CONFIG_PAX_KERNEXEC
49789 if (rom_start)
49790 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49791+#endif
49792
49793 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49794
49795diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49796index 56d8486..f26113f 100644
49797--- a/drivers/pci/hotplug/pci_hotplug_core.c
49798+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49799@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49800 return -EINVAL;
49801 }
49802
49803- slot->ops->owner = owner;
49804- slot->ops->mod_name = mod_name;
49805+ pax_open_kernel();
49806+ *(struct module **)&slot->ops->owner = owner;
49807+ *(const char **)&slot->ops->mod_name = mod_name;
49808+ pax_close_kernel();
49809
49810 mutex_lock(&pci_hp_mutex);
49811 /*
49812diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49813index 07aa722..84514b4 100644
49814--- a/drivers/pci/hotplug/pciehp_core.c
49815+++ b/drivers/pci/hotplug/pciehp_core.c
49816@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49817 struct slot *slot = ctrl->slot;
49818 struct hotplug_slot *hotplug = NULL;
49819 struct hotplug_slot_info *info = NULL;
49820- struct hotplug_slot_ops *ops = NULL;
49821+ hotplug_slot_ops_no_const *ops = NULL;
49822 char name[SLOT_NAME_SIZE];
49823 int retval = -ENOMEM;
49824
49825diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49826index fd60806..ab6c565 100644
49827--- a/drivers/pci/msi.c
49828+++ b/drivers/pci/msi.c
49829@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49830 {
49831 struct attribute **msi_attrs;
49832 struct attribute *msi_attr;
49833- struct device_attribute *msi_dev_attr;
49834- struct attribute_group *msi_irq_group;
49835+ device_attribute_no_const *msi_dev_attr;
49836+ attribute_group_no_const *msi_irq_group;
49837 const struct attribute_group **msi_irq_groups;
49838 struct msi_desc *entry;
49839 int ret = -ENOMEM;
49840@@ -573,7 +573,7 @@ error_attrs:
49841 count = 0;
49842 msi_attr = msi_attrs[count];
49843 while (msi_attr) {
49844- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49845+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49846 kfree(msi_attr->name);
49847 kfree(msi_dev_attr);
49848 ++count;
49849diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49850index 312f23a..d21181c 100644
49851--- a/drivers/pci/pci-sysfs.c
49852+++ b/drivers/pci/pci-sysfs.c
49853@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49854 {
49855 /* allocate attribute structure, piggyback attribute name */
49856 int name_len = write_combine ? 13 : 10;
49857- struct bin_attribute *res_attr;
49858+ bin_attribute_no_const *res_attr;
49859 int retval;
49860
49861 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49862@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49863 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49864 {
49865 int retval;
49866- struct bin_attribute *attr;
49867+ bin_attribute_no_const *attr;
49868
49869 /* If the device has VPD, try to expose it in sysfs. */
49870 if (dev->vpd) {
49871@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49872 {
49873 int retval;
49874 int rom_size = 0;
49875- struct bin_attribute *attr;
49876+ bin_attribute_no_const *attr;
49877
49878 if (!sysfs_initialized)
49879 return -EACCES;
49880diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49881index d54632a..198c84d 100644
49882--- a/drivers/pci/pci.h
49883+++ b/drivers/pci/pci.h
49884@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49885 struct pci_vpd {
49886 unsigned int len;
49887 const struct pci_vpd_ops *ops;
49888- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49889+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49890 };
49891
49892 int pci_vpd_pci22_init(struct pci_dev *dev);
49893diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49894index e1e7026..d28dd33 100644
49895--- a/drivers/pci/pcie/aspm.c
49896+++ b/drivers/pci/pcie/aspm.c
49897@@ -27,9 +27,9 @@
49898 #define MODULE_PARAM_PREFIX "pcie_aspm."
49899
49900 /* Note: those are not register definitions */
49901-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49902-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49903-#define ASPM_STATE_L1 (4) /* L1 state */
49904+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49905+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49906+#define ASPM_STATE_L1 (4U) /* L1 state */
49907 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49908 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49909
49910diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49911index 23212f8..65e945b 100644
49912--- a/drivers/pci/probe.c
49913+++ b/drivers/pci/probe.c
49914@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49915 u16 orig_cmd;
49916 struct pci_bus_region region, inverted_region;
49917
49918- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49919+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49920
49921 /* No printks while decoding is disabled! */
49922 if (!dev->mmio_always_on) {
49923diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
49924index 3f155e7..0f4b1f0 100644
49925--- a/drivers/pci/proc.c
49926+++ b/drivers/pci/proc.c
49927@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
49928 static int __init pci_proc_init(void)
49929 {
49930 struct pci_dev *dev = NULL;
49931+
49932+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49933+#ifdef CONFIG_GRKERNSEC_PROC_USER
49934+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
49935+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49936+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49937+#endif
49938+#else
49939 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
49940+#endif
49941 proc_create("devices", 0, proc_bus_pci_dir,
49942 &proc_bus_pci_dev_operations);
49943 proc_initialized = 1;
49944diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
49945index b84fdd6..b89d829 100644
49946--- a/drivers/platform/chrome/chromeos_laptop.c
49947+++ b/drivers/platform/chrome/chromeos_laptop.c
49948@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
49949 .callback = chromeos_laptop_dmi_matched, \
49950 .driver_data = (void *)&board_
49951
49952-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
49953+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
49954 {
49955 .ident = "Samsung Series 5 550",
49956 .matches = {
49957diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
49958index 1e1e594..8fe59c5 100644
49959--- a/drivers/platform/x86/alienware-wmi.c
49960+++ b/drivers/platform/x86/alienware-wmi.c
49961@@ -150,7 +150,7 @@ struct wmax_led_args {
49962 } __packed;
49963
49964 static struct platform_device *platform_device;
49965-static struct device_attribute *zone_dev_attrs;
49966+static device_attribute_no_const *zone_dev_attrs;
49967 static struct attribute **zone_attrs;
49968 static struct platform_zone *zone_data;
49969
49970@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
49971 }
49972 };
49973
49974-static struct attribute_group zone_attribute_group = {
49975+static attribute_group_no_const zone_attribute_group = {
49976 .name = "rgb_zones",
49977 };
49978
49979diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
49980index 7543a56..367ca8ed 100644
49981--- a/drivers/platform/x86/asus-wmi.c
49982+++ b/drivers/platform/x86/asus-wmi.c
49983@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
49984 int err;
49985 u32 retval = -1;
49986
49987+#ifdef CONFIG_GRKERNSEC_KMEM
49988+ return -EPERM;
49989+#endif
49990+
49991 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
49992
49993 if (err < 0)
49994@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
49995 int err;
49996 u32 retval = -1;
49997
49998+#ifdef CONFIG_GRKERNSEC_KMEM
49999+ return -EPERM;
50000+#endif
50001+
50002 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50003 &retval);
50004
50005@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
50006 union acpi_object *obj;
50007 acpi_status status;
50008
50009+#ifdef CONFIG_GRKERNSEC_KMEM
50010+ return -EPERM;
50011+#endif
50012+
50013 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50014 1, asus->debug.method_id,
50015 &input, &output);
50016diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50017index 0859877..1cf7d08 100644
50018--- a/drivers/platform/x86/msi-laptop.c
50019+++ b/drivers/platform/x86/msi-laptop.c
50020@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50021
50022 if (!quirks->ec_read_only) {
50023 /* allow userland write sysfs file */
50024- dev_attr_bluetooth.store = store_bluetooth;
50025- dev_attr_wlan.store = store_wlan;
50026- dev_attr_threeg.store = store_threeg;
50027- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50028- dev_attr_wlan.attr.mode |= S_IWUSR;
50029- dev_attr_threeg.attr.mode |= S_IWUSR;
50030+ pax_open_kernel();
50031+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50032+ *(void **)&dev_attr_wlan.store = store_wlan;
50033+ *(void **)&dev_attr_threeg.store = store_threeg;
50034+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50035+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50036+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50037+ pax_close_kernel();
50038 }
50039
50040 /* disable hardware control by fn key */
50041diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50042index 6d2bac0..ec2b029 100644
50043--- a/drivers/platform/x86/msi-wmi.c
50044+++ b/drivers/platform/x86/msi-wmi.c
50045@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50046 static void msi_wmi_notify(u32 value, void *context)
50047 {
50048 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50049- static struct key_entry *key;
50050+ struct key_entry *key;
50051 union acpi_object *obj;
50052 acpi_status status;
50053
50054diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50055index 6dd1c0e..5d602c7 100644
50056--- a/drivers/platform/x86/sony-laptop.c
50057+++ b/drivers/platform/x86/sony-laptop.c
50058@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50059 }
50060
50061 /* High speed charging function */
50062-static struct device_attribute *hsc_handle;
50063+static device_attribute_no_const *hsc_handle;
50064
50065 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50066 struct device_attribute *attr,
50067@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50068 }
50069
50070 /* low battery function */
50071-static struct device_attribute *lowbatt_handle;
50072+static device_attribute_no_const *lowbatt_handle;
50073
50074 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50075 struct device_attribute *attr,
50076@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50077 }
50078
50079 /* fan speed function */
50080-static struct device_attribute *fan_handle, *hsf_handle;
50081+static device_attribute_no_const *fan_handle, *hsf_handle;
50082
50083 static ssize_t sony_nc_hsfan_store(struct device *dev,
50084 struct device_attribute *attr,
50085@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50086 }
50087
50088 /* USB charge function */
50089-static struct device_attribute *uc_handle;
50090+static device_attribute_no_const *uc_handle;
50091
50092 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50093 struct device_attribute *attr,
50094@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50095 }
50096
50097 /* Panel ID function */
50098-static struct device_attribute *panel_handle;
50099+static device_attribute_no_const *panel_handle;
50100
50101 static ssize_t sony_nc_panelid_show(struct device *dev,
50102 struct device_attribute *attr, char *buffer)
50103@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50104 }
50105
50106 /* smart connect function */
50107-static struct device_attribute *sc_handle;
50108+static device_attribute_no_const *sc_handle;
50109
50110 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50111 struct device_attribute *attr,
50112diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50113index c3d11fa..f83cded 100644
50114--- a/drivers/platform/x86/thinkpad_acpi.c
50115+++ b/drivers/platform/x86/thinkpad_acpi.c
50116@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50117 return 0;
50118 }
50119
50120-void static hotkey_mask_warn_incomplete_mask(void)
50121+static void hotkey_mask_warn_incomplete_mask(void)
50122 {
50123 /* log only what the user can fix... */
50124 const u32 wantedmask = hotkey_driver_mask &
50125@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50126 && !tp_features.bright_unkfw)
50127 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50128 }
50129+}
50130
50131 #undef TPACPI_COMPARE_KEY
50132 #undef TPACPI_MAY_SEND_KEY
50133-}
50134
50135 /*
50136 * Polling driver
50137diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50138index 438d4c7..ca8a2fb 100644
50139--- a/drivers/pnp/pnpbios/bioscalls.c
50140+++ b/drivers/pnp/pnpbios/bioscalls.c
50141@@ -59,7 +59,7 @@ do { \
50142 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50143 } while(0)
50144
50145-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50146+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50147 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50148
50149 /*
50150@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50151
50152 cpu = get_cpu();
50153 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50154+
50155+ pax_open_kernel();
50156 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50157+ pax_close_kernel();
50158
50159 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50160 spin_lock_irqsave(&pnp_bios_lock, flags);
50161@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50162 :"memory");
50163 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50164
50165+ pax_open_kernel();
50166 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50167+ pax_close_kernel();
50168+
50169 put_cpu();
50170
50171 /* If we get here and this is set then the PnP BIOS faulted on us. */
50172@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50173 return status;
50174 }
50175
50176-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50177+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50178 {
50179 int i;
50180
50181@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50182 pnp_bios_callpoint.offset = header->fields.pm16offset;
50183 pnp_bios_callpoint.segment = PNP_CS16;
50184
50185+ pax_open_kernel();
50186+
50187 for_each_possible_cpu(i) {
50188 struct desc_struct *gdt = get_cpu_gdt_table(i);
50189 if (!gdt)
50190@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50191 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50192 (unsigned long)__va(header->fields.pm16dseg));
50193 }
50194+
50195+ pax_close_kernel();
50196 }
50197diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50198index 0c52e2a..3421ab7 100644
50199--- a/drivers/power/pda_power.c
50200+++ b/drivers/power/pda_power.c
50201@@ -37,7 +37,11 @@ static int polling;
50202
50203 #if IS_ENABLED(CONFIG_USB_PHY)
50204 static struct usb_phy *transceiver;
50205-static struct notifier_block otg_nb;
50206+static int otg_handle_notification(struct notifier_block *nb,
50207+ unsigned long event, void *unused);
50208+static struct notifier_block otg_nb = {
50209+ .notifier_call = otg_handle_notification
50210+};
50211 #endif
50212
50213 static struct regulator *ac_draw;
50214@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50215
50216 #if IS_ENABLED(CONFIG_USB_PHY)
50217 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50218- otg_nb.notifier_call = otg_handle_notification;
50219 ret = usb_register_notifier(transceiver, &otg_nb);
50220 if (ret) {
50221 dev_err(dev, "failure to register otg notifier\n");
50222diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50223index cc439fd..8fa30df 100644
50224--- a/drivers/power/power_supply.h
50225+++ b/drivers/power/power_supply.h
50226@@ -16,12 +16,12 @@ struct power_supply;
50227
50228 #ifdef CONFIG_SYSFS
50229
50230-extern void power_supply_init_attrs(struct device_type *dev_type);
50231+extern void power_supply_init_attrs(void);
50232 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50233
50234 #else
50235
50236-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50237+static inline void power_supply_init_attrs(void) {}
50238 #define power_supply_uevent NULL
50239
50240 #endif /* CONFIG_SYSFS */
50241diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50242index 694e8cd..9f03483 100644
50243--- a/drivers/power/power_supply_core.c
50244+++ b/drivers/power/power_supply_core.c
50245@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50246 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50247 EXPORT_SYMBOL_GPL(power_supply_notifier);
50248
50249-static struct device_type power_supply_dev_type;
50250+extern const struct attribute_group *power_supply_attr_groups[];
50251+static struct device_type power_supply_dev_type = {
50252+ .groups = power_supply_attr_groups,
50253+};
50254
50255 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50256 struct power_supply *supply)
50257@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50258 return PTR_ERR(power_supply_class);
50259
50260 power_supply_class->dev_uevent = power_supply_uevent;
50261- power_supply_init_attrs(&power_supply_dev_type);
50262+ power_supply_init_attrs();
50263
50264 return 0;
50265 }
50266diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50267index 62653f5..d0bb485 100644
50268--- a/drivers/power/power_supply_sysfs.c
50269+++ b/drivers/power/power_supply_sysfs.c
50270@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50271 .is_visible = power_supply_attr_is_visible,
50272 };
50273
50274-static const struct attribute_group *power_supply_attr_groups[] = {
50275+const struct attribute_group *power_supply_attr_groups[] = {
50276 &power_supply_attr_group,
50277 NULL,
50278 };
50279
50280-void power_supply_init_attrs(struct device_type *dev_type)
50281+void power_supply_init_attrs(void)
50282 {
50283 int i;
50284
50285- dev_type->groups = power_supply_attr_groups;
50286-
50287 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50288 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50289 }
50290diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50291index 84419af..268ede8 100644
50292--- a/drivers/powercap/powercap_sys.c
50293+++ b/drivers/powercap/powercap_sys.c
50294@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50295 struct device_attribute name_attr;
50296 };
50297
50298+static ssize_t show_constraint_name(struct device *dev,
50299+ struct device_attribute *dev_attr,
50300+ char *buf);
50301+
50302 static struct powercap_constraint_attr
50303- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50304+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50305+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50306+ .power_limit_attr = {
50307+ .attr = {
50308+ .name = NULL,
50309+ .mode = S_IWUSR | S_IRUGO
50310+ },
50311+ .show = show_constraint_power_limit_uw,
50312+ .store = store_constraint_power_limit_uw
50313+ },
50314+
50315+ .time_window_attr = {
50316+ .attr = {
50317+ .name = NULL,
50318+ .mode = S_IWUSR | S_IRUGO
50319+ },
50320+ .show = show_constraint_time_window_us,
50321+ .store = store_constraint_time_window_us
50322+ },
50323+
50324+ .max_power_attr = {
50325+ .attr = {
50326+ .name = NULL,
50327+ .mode = S_IRUGO
50328+ },
50329+ .show = show_constraint_max_power_uw,
50330+ .store = NULL
50331+ },
50332+
50333+ .min_power_attr = {
50334+ .attr = {
50335+ .name = NULL,
50336+ .mode = S_IRUGO
50337+ },
50338+ .show = show_constraint_min_power_uw,
50339+ .store = NULL
50340+ },
50341+
50342+ .max_time_window_attr = {
50343+ .attr = {
50344+ .name = NULL,
50345+ .mode = S_IRUGO
50346+ },
50347+ .show = show_constraint_max_time_window_us,
50348+ .store = NULL
50349+ },
50350+
50351+ .min_time_window_attr = {
50352+ .attr = {
50353+ .name = NULL,
50354+ .mode = S_IRUGO
50355+ },
50356+ .show = show_constraint_min_time_window_us,
50357+ .store = NULL
50358+ },
50359+
50360+ .name_attr = {
50361+ .attr = {
50362+ .name = NULL,
50363+ .mode = S_IRUGO
50364+ },
50365+ .show = show_constraint_name,
50366+ .store = NULL
50367+ }
50368+ }
50369+};
50370
50371 /* A list of powercap control_types */
50372 static LIST_HEAD(powercap_cntrl_list);
50373@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50374 }
50375
50376 static int create_constraint_attribute(int id, const char *name,
50377- int mode,
50378- struct device_attribute *dev_attr,
50379- ssize_t (*show)(struct device *,
50380- struct device_attribute *, char *),
50381- ssize_t (*store)(struct device *,
50382- struct device_attribute *,
50383- const char *, size_t)
50384- )
50385+ struct device_attribute *dev_attr)
50386 {
50387+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50388
50389- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50390- id, name);
50391- if (!dev_attr->attr.name)
50392+ if (!name)
50393 return -ENOMEM;
50394- dev_attr->attr.mode = mode;
50395- dev_attr->show = show;
50396- dev_attr->store = store;
50397+
50398+ pax_open_kernel();
50399+ *(const char **)&dev_attr->attr.name = name;
50400+ pax_close_kernel();
50401
50402 return 0;
50403 }
50404@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50405
50406 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50407 ret = create_constraint_attribute(i, "power_limit_uw",
50408- S_IWUSR | S_IRUGO,
50409- &constraint_attrs[i].power_limit_attr,
50410- show_constraint_power_limit_uw,
50411- store_constraint_power_limit_uw);
50412+ &constraint_attrs[i].power_limit_attr);
50413 if (ret)
50414 goto err_alloc;
50415 ret = create_constraint_attribute(i, "time_window_us",
50416- S_IWUSR | S_IRUGO,
50417- &constraint_attrs[i].time_window_attr,
50418- show_constraint_time_window_us,
50419- store_constraint_time_window_us);
50420+ &constraint_attrs[i].time_window_attr);
50421 if (ret)
50422 goto err_alloc;
50423- ret = create_constraint_attribute(i, "name", S_IRUGO,
50424- &constraint_attrs[i].name_attr,
50425- show_constraint_name,
50426- NULL);
50427+ ret = create_constraint_attribute(i, "name",
50428+ &constraint_attrs[i].name_attr);
50429 if (ret)
50430 goto err_alloc;
50431- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50432- &constraint_attrs[i].max_power_attr,
50433- show_constraint_max_power_uw,
50434- NULL);
50435+ ret = create_constraint_attribute(i, "max_power_uw",
50436+ &constraint_attrs[i].max_power_attr);
50437 if (ret)
50438 goto err_alloc;
50439- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50440- &constraint_attrs[i].min_power_attr,
50441- show_constraint_min_power_uw,
50442- NULL);
50443+ ret = create_constraint_attribute(i, "min_power_uw",
50444+ &constraint_attrs[i].min_power_attr);
50445 if (ret)
50446 goto err_alloc;
50447 ret = create_constraint_attribute(i, "max_time_window_us",
50448- S_IRUGO,
50449- &constraint_attrs[i].max_time_window_attr,
50450- show_constraint_max_time_window_us,
50451- NULL);
50452+ &constraint_attrs[i].max_time_window_attr);
50453 if (ret)
50454 goto err_alloc;
50455 ret = create_constraint_attribute(i, "min_time_window_us",
50456- S_IRUGO,
50457- &constraint_attrs[i].min_time_window_attr,
50458- show_constraint_min_time_window_us,
50459- NULL);
50460+ &constraint_attrs[i].min_time_window_attr);
50461 if (ret)
50462 goto err_alloc;
50463
50464@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50465 power_zone->zone_dev_attrs[count++] =
50466 &dev_attr_max_energy_range_uj.attr;
50467 if (power_zone->ops->get_energy_uj) {
50468+ pax_open_kernel();
50469 if (power_zone->ops->reset_energy_uj)
50470- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50471+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50472 else
50473- dev_attr_energy_uj.attr.mode = S_IRUGO;
50474+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50475+ pax_close_kernel();
50476 power_zone->zone_dev_attrs[count++] =
50477 &dev_attr_energy_uj.attr;
50478 }
50479diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50480index 9c5d414..c7900ce 100644
50481--- a/drivers/ptp/ptp_private.h
50482+++ b/drivers/ptp/ptp_private.h
50483@@ -51,7 +51,7 @@ struct ptp_clock {
50484 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50485 wait_queue_head_t tsev_wq;
50486 int defunct; /* tells readers to go away when clock is being removed */
50487- struct device_attribute *pin_dev_attr;
50488+ device_attribute_no_const *pin_dev_attr;
50489 struct attribute **pin_attr;
50490 struct attribute_group pin_attr_group;
50491 };
50492diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50493index 302e626..12579af 100644
50494--- a/drivers/ptp/ptp_sysfs.c
50495+++ b/drivers/ptp/ptp_sysfs.c
50496@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50497 goto no_pin_attr;
50498
50499 for (i = 0; i < n_pins; i++) {
50500- struct device_attribute *da = &ptp->pin_dev_attr[i];
50501+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50502 sysfs_attr_init(&da->attr);
50503 da->attr.name = info->pin_config[i].name;
50504 da->attr.mode = 0644;
50505diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50506index a5761d0..a2a4540 100644
50507--- a/drivers/regulator/core.c
50508+++ b/drivers/regulator/core.c
50509@@ -3591,7 +3591,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50510 {
50511 const struct regulation_constraints *constraints = NULL;
50512 const struct regulator_init_data *init_data;
50513- static atomic_t regulator_no = ATOMIC_INIT(0);
50514+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50515 struct regulator_dev *rdev;
50516 struct device *dev;
50517 int ret, i;
50518@@ -3665,7 +3665,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50519 rdev->dev.class = &regulator_class;
50520 rdev->dev.parent = dev;
50521 dev_set_name(&rdev->dev, "regulator.%d",
50522- atomic_inc_return(&regulator_no) - 1);
50523+ atomic_inc_return_unchecked(&regulator_no) - 1);
50524 ret = device_register(&rdev->dev);
50525 if (ret != 0) {
50526 put_device(&rdev->dev);
50527diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50528index 7eee2ca..4024513 100644
50529--- a/drivers/regulator/max8660.c
50530+++ b/drivers/regulator/max8660.c
50531@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50532 max8660->shadow_regs[MAX8660_OVER1] = 5;
50533 } else {
50534 /* Otherwise devices can be toggled via software */
50535- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50536- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50537+ pax_open_kernel();
50538+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50539+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50540+ pax_close_kernel();
50541 }
50542
50543 /*
50544diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50545index c3d55c2..0dddfe6 100644
50546--- a/drivers/regulator/max8973-regulator.c
50547+++ b/drivers/regulator/max8973-regulator.c
50548@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50549 if (!pdata || !pdata->enable_ext_control) {
50550 max->desc.enable_reg = MAX8973_VOUT;
50551 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50552- max->ops.enable = regulator_enable_regmap;
50553- max->ops.disable = regulator_disable_regmap;
50554- max->ops.is_enabled = regulator_is_enabled_regmap;
50555+ pax_open_kernel();
50556+ *(void **)&max->ops.enable = regulator_enable_regmap;
50557+ *(void **)&max->ops.disable = regulator_disable_regmap;
50558+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50559+ pax_close_kernel();
50560 }
50561
50562 if (pdata) {
50563diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50564index 0d17c92..a29f627 100644
50565--- a/drivers/regulator/mc13892-regulator.c
50566+++ b/drivers/regulator/mc13892-regulator.c
50567@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50568 mc13xxx_unlock(mc13892);
50569
50570 /* update mc13892_vcam ops */
50571- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50572+ pax_open_kernel();
50573+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50574 sizeof(struct regulator_ops));
50575- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50576- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50577+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50578+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50579+ pax_close_kernel();
50580 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50581
50582 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50583diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50584index 5b2e761..c8c8a4a 100644
50585--- a/drivers/rtc/rtc-cmos.c
50586+++ b/drivers/rtc/rtc-cmos.c
50587@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50588 hpet_rtc_timer_init();
50589
50590 /* export at least the first block of NVRAM */
50591- nvram.size = address_space - NVRAM_OFFSET;
50592+ pax_open_kernel();
50593+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50594+ pax_close_kernel();
50595 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50596 if (retval < 0) {
50597 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50598diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50599index d049393..bb20be0 100644
50600--- a/drivers/rtc/rtc-dev.c
50601+++ b/drivers/rtc/rtc-dev.c
50602@@ -16,6 +16,7 @@
50603 #include <linux/module.h>
50604 #include <linux/rtc.h>
50605 #include <linux/sched.h>
50606+#include <linux/grsecurity.h>
50607 #include "rtc-core.h"
50608
50609 static dev_t rtc_devt;
50610@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50611 if (copy_from_user(&tm, uarg, sizeof(tm)))
50612 return -EFAULT;
50613
50614+ gr_log_timechange();
50615+
50616 return rtc_set_time(rtc, &tm);
50617
50618 case RTC_PIE_ON:
50619diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50620index 4ffabb3..1f87fca 100644
50621--- a/drivers/rtc/rtc-ds1307.c
50622+++ b/drivers/rtc/rtc-ds1307.c
50623@@ -107,7 +107,7 @@ struct ds1307 {
50624 u8 offset; /* register's offset */
50625 u8 regs[11];
50626 u16 nvram_offset;
50627- struct bin_attribute *nvram;
50628+ bin_attribute_no_const *nvram;
50629 enum ds_type type;
50630 unsigned long flags;
50631 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50632diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50633index 90abb5b..e0bf6dd 100644
50634--- a/drivers/rtc/rtc-m48t59.c
50635+++ b/drivers/rtc/rtc-m48t59.c
50636@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50637 if (IS_ERR(m48t59->rtc))
50638 return PTR_ERR(m48t59->rtc);
50639
50640- m48t59_nvram_attr.size = pdata->offset;
50641+ pax_open_kernel();
50642+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50643+ pax_close_kernel();
50644
50645 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50646 if (ret)
50647diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50648index e693af6..2e525b6 100644
50649--- a/drivers/scsi/bfa/bfa_fcpim.h
50650+++ b/drivers/scsi/bfa/bfa_fcpim.h
50651@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50652
50653 struct bfa_itn_s {
50654 bfa_isr_func_t isr;
50655-};
50656+} __no_const;
50657
50658 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50659 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50660diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50661index 0f19455..ef7adb5 100644
50662--- a/drivers/scsi/bfa/bfa_fcs.c
50663+++ b/drivers/scsi/bfa/bfa_fcs.c
50664@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50665 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50666
50667 static struct bfa_fcs_mod_s fcs_modules[] = {
50668- { bfa_fcs_port_attach, NULL, NULL },
50669- { bfa_fcs_uf_attach, NULL, NULL },
50670- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50671- bfa_fcs_fabric_modexit },
50672+ {
50673+ .attach = bfa_fcs_port_attach,
50674+ .modinit = NULL,
50675+ .modexit = NULL
50676+ },
50677+ {
50678+ .attach = bfa_fcs_uf_attach,
50679+ .modinit = NULL,
50680+ .modexit = NULL
50681+ },
50682+ {
50683+ .attach = bfa_fcs_fabric_attach,
50684+ .modinit = bfa_fcs_fabric_modinit,
50685+ .modexit = bfa_fcs_fabric_modexit
50686+ },
50687 };
50688
50689 /*
50690diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50691index ff75ef8..2dfe00a 100644
50692--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50693+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50694@@ -89,15 +89,26 @@ static struct {
50695 void (*offline) (struct bfa_fcs_lport_s *port);
50696 } __port_action[] = {
50697 {
50698- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50699- bfa_fcs_lport_unknown_offline}, {
50700- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50701- bfa_fcs_lport_fab_offline}, {
50702- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50703- bfa_fcs_lport_n2n_offline}, {
50704- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50705- bfa_fcs_lport_loop_offline},
50706- };
50707+ .init = bfa_fcs_lport_unknown_init,
50708+ .online = bfa_fcs_lport_unknown_online,
50709+ .offline = bfa_fcs_lport_unknown_offline
50710+ },
50711+ {
50712+ .init = bfa_fcs_lport_fab_init,
50713+ .online = bfa_fcs_lport_fab_online,
50714+ .offline = bfa_fcs_lport_fab_offline
50715+ },
50716+ {
50717+ .init = bfa_fcs_lport_n2n_init,
50718+ .online = bfa_fcs_lport_n2n_online,
50719+ .offline = bfa_fcs_lport_n2n_offline
50720+ },
50721+ {
50722+ .init = bfa_fcs_lport_loop_init,
50723+ .online = bfa_fcs_lport_loop_online,
50724+ .offline = bfa_fcs_lport_loop_offline
50725+ },
50726+};
50727
50728 /*
50729 * fcs_port_sm FCS logical port state machine
50730diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50731index a38aafa0..fe8f03b 100644
50732--- a/drivers/scsi/bfa/bfa_ioc.h
50733+++ b/drivers/scsi/bfa/bfa_ioc.h
50734@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50735 bfa_ioc_disable_cbfn_t disable_cbfn;
50736 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50737 bfa_ioc_reset_cbfn_t reset_cbfn;
50738-};
50739+} __no_const;
50740
50741 /*
50742 * IOC event notification mechanism.
50743@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50744 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50745 enum bfi_ioc_state fwstate);
50746 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50747-};
50748+} __no_const;
50749
50750 /*
50751 * Queue element to wait for room in request queue. FIFO order is
50752diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50753index a14c784..6de6790 100644
50754--- a/drivers/scsi/bfa/bfa_modules.h
50755+++ b/drivers/scsi/bfa/bfa_modules.h
50756@@ -78,12 +78,12 @@ enum {
50757 \
50758 extern struct bfa_module_s hal_mod_ ## __mod; \
50759 struct bfa_module_s hal_mod_ ## __mod = { \
50760- bfa_ ## __mod ## _meminfo, \
50761- bfa_ ## __mod ## _attach, \
50762- bfa_ ## __mod ## _detach, \
50763- bfa_ ## __mod ## _start, \
50764- bfa_ ## __mod ## _stop, \
50765- bfa_ ## __mod ## _iocdisable, \
50766+ .meminfo = bfa_ ## __mod ## _meminfo, \
50767+ .attach = bfa_ ## __mod ## _attach, \
50768+ .detach = bfa_ ## __mod ## _detach, \
50769+ .start = bfa_ ## __mod ## _start, \
50770+ .stop = bfa_ ## __mod ## _stop, \
50771+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50772 }
50773
50774 #define BFA_CACHELINE_SZ (256)
50775diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50776index 045c4e1..13de803 100644
50777--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50778+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50779@@ -33,8 +33,8 @@
50780 */
50781 #include "libfcoe.h"
50782
50783-static atomic_t ctlr_num;
50784-static atomic_t fcf_num;
50785+static atomic_unchecked_t ctlr_num;
50786+static atomic_unchecked_t fcf_num;
50787
50788 /*
50789 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50790@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50791 if (!ctlr)
50792 goto out;
50793
50794- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50795+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50796 ctlr->f = f;
50797 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50798 INIT_LIST_HEAD(&ctlr->fcfs);
50799@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50800 fcf->dev.parent = &ctlr->dev;
50801 fcf->dev.bus = &fcoe_bus_type;
50802 fcf->dev.type = &fcoe_fcf_device_type;
50803- fcf->id = atomic_inc_return(&fcf_num) - 1;
50804+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50805 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50806
50807 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50808@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50809 {
50810 int error;
50811
50812- atomic_set(&ctlr_num, 0);
50813- atomic_set(&fcf_num, 0);
50814+ atomic_set_unchecked(&ctlr_num, 0);
50815+ atomic_set_unchecked(&fcf_num, 0);
50816
50817 error = bus_register(&fcoe_bus_type);
50818 if (error)
50819diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50820index 8bb173e..20236b4 100644
50821--- a/drivers/scsi/hosts.c
50822+++ b/drivers/scsi/hosts.c
50823@@ -42,7 +42,7 @@
50824 #include "scsi_logging.h"
50825
50826
50827-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50828+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50829
50830
50831 static void scsi_host_cls_release(struct device *dev)
50832@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50833 * subtract one because we increment first then return, but we need to
50834 * know what the next host number was before increment
50835 */
50836- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50837+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50838 shost->dma_channel = 0xff;
50839
50840 /* These three are default values which can be overridden */
50841diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50842index 6bb4611..0203251 100644
50843--- a/drivers/scsi/hpsa.c
50844+++ b/drivers/scsi/hpsa.c
50845@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50846 struct reply_queue_buffer *rq = &h->reply_queue[q];
50847
50848 if (h->transMethod & CFGTBL_Trans_io_accel1)
50849- return h->access.command_completed(h, q);
50850+ return h->access->command_completed(h, q);
50851
50852 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50853- return h->access.command_completed(h, q);
50854+ return h->access->command_completed(h, q);
50855
50856 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50857 a = rq->head[rq->current_entry];
50858@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50859 while (!list_empty(&h->reqQ)) {
50860 c = list_entry(h->reqQ.next, struct CommandList, list);
50861 /* can't do anything if fifo is full */
50862- if ((h->access.fifo_full(h))) {
50863+ if ((h->access->fifo_full(h))) {
50864 h->fifo_recently_full = 1;
50865 dev_warn(&h->pdev->dev, "fifo full\n");
50866 break;
50867@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50868 atomic_inc(&h->commands_outstanding);
50869 spin_unlock_irqrestore(&h->lock, *flags);
50870 /* Tell the controller execute command */
50871- h->access.submit_command(h, c);
50872+ h->access->submit_command(h, c);
50873 spin_lock_irqsave(&h->lock, *flags);
50874 }
50875 }
50876@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50877
50878 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50879 {
50880- return h->access.command_completed(h, q);
50881+ return h->access->command_completed(h, q);
50882 }
50883
50884 static inline bool interrupt_pending(struct ctlr_info *h)
50885 {
50886- return h->access.intr_pending(h);
50887+ return h->access->intr_pending(h);
50888 }
50889
50890 static inline long interrupt_not_for_us(struct ctlr_info *h)
50891 {
50892- return (h->access.intr_pending(h) == 0) ||
50893+ return (h->access->intr_pending(h) == 0) ||
50894 (h->interrupts_enabled == 0);
50895 }
50896
50897@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50898 if (prod_index < 0)
50899 return -ENODEV;
50900 h->product_name = products[prod_index].product_name;
50901- h->access = *(products[prod_index].access);
50902+ h->access = products[prod_index].access;
50903
50904 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50905 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50906@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50907 unsigned long flags;
50908 u32 lockup_detected;
50909
50910- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50911+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50912 spin_lock_irqsave(&h->lock, flags);
50913 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50914 if (!lockup_detected) {
50915@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50916 }
50917
50918 /* make sure the board interrupts are off */
50919- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50920+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50921
50922 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
50923 goto clean2;
50924@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
50925 * fake ones to scoop up any residual completions.
50926 */
50927 spin_lock_irqsave(&h->lock, flags);
50928- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50929+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50930 spin_unlock_irqrestore(&h->lock, flags);
50931 free_irqs(h);
50932 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
50933@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
50934 dev_info(&h->pdev->dev, "Board READY.\n");
50935 dev_info(&h->pdev->dev,
50936 "Waiting for stale completions to drain.\n");
50937- h->access.set_intr_mask(h, HPSA_INTR_ON);
50938+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50939 msleep(10000);
50940- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50941+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50942
50943 rc = controller_reset_failed(h->cfgtable);
50944 if (rc)
50945@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
50946 h->drv_req_rescan = 0;
50947
50948 /* Turn the interrupts on so we can service requests */
50949- h->access.set_intr_mask(h, HPSA_INTR_ON);
50950+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50951
50952 hpsa_hba_inquiry(h);
50953 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
50954@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
50955 * To write all data in the battery backed cache to disks
50956 */
50957 hpsa_flush_cache(h);
50958- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50959+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50960 hpsa_free_irqs_and_disable_msix(h);
50961 }
50962
50963@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50964 CFGTBL_Trans_enable_directed_msix |
50965 (trans_support & (CFGTBL_Trans_io_accel1 |
50966 CFGTBL_Trans_io_accel2));
50967- struct access_method access = SA5_performant_access;
50968+ struct access_method *access = &SA5_performant_access;
50969
50970 /* This is a bit complicated. There are 8 registers on
50971 * the controller which we write to to tell it 8 different
50972@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50973 * perform the superfluous readl() after each command submission.
50974 */
50975 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
50976- access = SA5_performant_access_no_read;
50977+ access = &SA5_performant_access_no_read;
50978
50979 /* Controller spec: zero out this buffer. */
50980 for (i = 0; i < h->nreply_queues; i++)
50981@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50982 * enable outbound interrupt coalescing in accelerator mode;
50983 */
50984 if (trans_support & CFGTBL_Trans_io_accel1) {
50985- access = SA5_ioaccel_mode1_access;
50986+ access = &SA5_ioaccel_mode1_access;
50987 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50988 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50989 } else {
50990 if (trans_support & CFGTBL_Trans_io_accel2) {
50991- access = SA5_ioaccel_mode2_access;
50992+ access = &SA5_ioaccel_mode2_access;
50993 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50994 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50995 }
50996diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
50997index 8e06d9e..396e0a1 100644
50998--- a/drivers/scsi/hpsa.h
50999+++ b/drivers/scsi/hpsa.h
51000@@ -127,7 +127,7 @@ struct ctlr_info {
51001 unsigned int msix_vector;
51002 unsigned int msi_vector;
51003 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51004- struct access_method access;
51005+ struct access_method *access;
51006 char hba_mode_enabled;
51007
51008 /* queue and queue Info */
51009@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51010 }
51011
51012 static struct access_method SA5_access = {
51013- SA5_submit_command,
51014- SA5_intr_mask,
51015- SA5_fifo_full,
51016- SA5_intr_pending,
51017- SA5_completed,
51018+ .submit_command = SA5_submit_command,
51019+ .set_intr_mask = SA5_intr_mask,
51020+ .fifo_full = SA5_fifo_full,
51021+ .intr_pending = SA5_intr_pending,
51022+ .command_completed = SA5_completed,
51023 };
51024
51025 static struct access_method SA5_ioaccel_mode1_access = {
51026- SA5_submit_command,
51027- SA5_performant_intr_mask,
51028- SA5_fifo_full,
51029- SA5_ioaccel_mode1_intr_pending,
51030- SA5_ioaccel_mode1_completed,
51031+ .submit_command = SA5_submit_command,
51032+ .set_intr_mask = SA5_performant_intr_mask,
51033+ .fifo_full = SA5_fifo_full,
51034+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51035+ .command_completed = SA5_ioaccel_mode1_completed,
51036 };
51037
51038 static struct access_method SA5_ioaccel_mode2_access = {
51039- SA5_submit_command_ioaccel2,
51040- SA5_performant_intr_mask,
51041- SA5_fifo_full,
51042- SA5_performant_intr_pending,
51043- SA5_performant_completed,
51044+ .submit_command = SA5_submit_command_ioaccel2,
51045+ .set_intr_mask = SA5_performant_intr_mask,
51046+ .fifo_full = SA5_fifo_full,
51047+ .intr_pending = SA5_performant_intr_pending,
51048+ .command_completed = SA5_performant_completed,
51049 };
51050
51051 static struct access_method SA5_performant_access = {
51052- SA5_submit_command,
51053- SA5_performant_intr_mask,
51054- SA5_fifo_full,
51055- SA5_performant_intr_pending,
51056- SA5_performant_completed,
51057+ .submit_command = SA5_submit_command,
51058+ .set_intr_mask = SA5_performant_intr_mask,
51059+ .fifo_full = SA5_fifo_full,
51060+ .intr_pending = SA5_performant_intr_pending,
51061+ .command_completed = SA5_performant_completed,
51062 };
51063
51064 static struct access_method SA5_performant_access_no_read = {
51065- SA5_submit_command_no_read,
51066- SA5_performant_intr_mask,
51067- SA5_fifo_full,
51068- SA5_performant_intr_pending,
51069- SA5_performant_completed,
51070+ .submit_command = SA5_submit_command_no_read,
51071+ .set_intr_mask = SA5_performant_intr_mask,
51072+ .fifo_full = SA5_fifo_full,
51073+ .intr_pending = SA5_performant_intr_pending,
51074+ .command_completed = SA5_performant_completed,
51075 };
51076
51077 struct board_type {
51078diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51079index 1b3a094..068e683 100644
51080--- a/drivers/scsi/libfc/fc_exch.c
51081+++ b/drivers/scsi/libfc/fc_exch.c
51082@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51083 u16 pool_max_index;
51084
51085 struct {
51086- atomic_t no_free_exch;
51087- atomic_t no_free_exch_xid;
51088- atomic_t xid_not_found;
51089- atomic_t xid_busy;
51090- atomic_t seq_not_found;
51091- atomic_t non_bls_resp;
51092+ atomic_unchecked_t no_free_exch;
51093+ atomic_unchecked_t no_free_exch_xid;
51094+ atomic_unchecked_t xid_not_found;
51095+ atomic_unchecked_t xid_busy;
51096+ atomic_unchecked_t seq_not_found;
51097+ atomic_unchecked_t non_bls_resp;
51098 } stats;
51099 };
51100
51101@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51102 /* allocate memory for exchange */
51103 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51104 if (!ep) {
51105- atomic_inc(&mp->stats.no_free_exch);
51106+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51107 goto out;
51108 }
51109 memset(ep, 0, sizeof(*ep));
51110@@ -874,7 +874,7 @@ out:
51111 return ep;
51112 err:
51113 spin_unlock_bh(&pool->lock);
51114- atomic_inc(&mp->stats.no_free_exch_xid);
51115+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51116 mempool_free(ep, mp->ep_pool);
51117 return NULL;
51118 }
51119@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51120 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51121 ep = fc_exch_find(mp, xid);
51122 if (!ep) {
51123- atomic_inc(&mp->stats.xid_not_found);
51124+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51125 reject = FC_RJT_OX_ID;
51126 goto out;
51127 }
51128@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51129 ep = fc_exch_find(mp, xid);
51130 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51131 if (ep) {
51132- atomic_inc(&mp->stats.xid_busy);
51133+ atomic_inc_unchecked(&mp->stats.xid_busy);
51134 reject = FC_RJT_RX_ID;
51135 goto rel;
51136 }
51137@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51138 }
51139 xid = ep->xid; /* get our XID */
51140 } else if (!ep) {
51141- atomic_inc(&mp->stats.xid_not_found);
51142+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51143 reject = FC_RJT_RX_ID; /* XID not found */
51144 goto out;
51145 }
51146@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51147 } else {
51148 sp = &ep->seq;
51149 if (sp->id != fh->fh_seq_id) {
51150- atomic_inc(&mp->stats.seq_not_found);
51151+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51152 if (f_ctl & FC_FC_END_SEQ) {
51153 /*
51154 * Update sequence_id based on incoming last
51155@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51156
51157 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51158 if (!ep) {
51159- atomic_inc(&mp->stats.xid_not_found);
51160+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51161 goto out;
51162 }
51163 if (ep->esb_stat & ESB_ST_COMPLETE) {
51164- atomic_inc(&mp->stats.xid_not_found);
51165+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51166 goto rel;
51167 }
51168 if (ep->rxid == FC_XID_UNKNOWN)
51169 ep->rxid = ntohs(fh->fh_rx_id);
51170 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51171- atomic_inc(&mp->stats.xid_not_found);
51172+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51173 goto rel;
51174 }
51175 if (ep->did != ntoh24(fh->fh_s_id) &&
51176 ep->did != FC_FID_FLOGI) {
51177- atomic_inc(&mp->stats.xid_not_found);
51178+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51179 goto rel;
51180 }
51181 sof = fr_sof(fp);
51182@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51183 sp->ssb_stat |= SSB_ST_RESP;
51184 sp->id = fh->fh_seq_id;
51185 } else if (sp->id != fh->fh_seq_id) {
51186- atomic_inc(&mp->stats.seq_not_found);
51187+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51188 goto rel;
51189 }
51190
51191@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51192 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51193
51194 if (!sp)
51195- atomic_inc(&mp->stats.xid_not_found);
51196+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51197 else
51198- atomic_inc(&mp->stats.non_bls_resp);
51199+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51200
51201 fc_frame_free(fp);
51202 }
51203@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51204
51205 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51206 mp = ema->mp;
51207- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51208+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51209 st->fc_no_free_exch_xid +=
51210- atomic_read(&mp->stats.no_free_exch_xid);
51211- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51212- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51213- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51214- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51215+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51216+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51217+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51218+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51219+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51220 }
51221 }
51222 EXPORT_SYMBOL(fc_exch_update_stats);
51223diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51224index 932d9cc..50c7ee9 100644
51225--- a/drivers/scsi/libsas/sas_ata.c
51226+++ b/drivers/scsi/libsas/sas_ata.c
51227@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51228 .postreset = ata_std_postreset,
51229 .error_handler = ata_std_error_handler,
51230 .post_internal_cmd = sas_ata_post_internal,
51231- .qc_defer = ata_std_qc_defer,
51232+ .qc_defer = ata_std_qc_defer,
51233 .qc_prep = ata_noop_qc_prep,
51234 .qc_issue = sas_ata_qc_issue,
51235 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51236diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51237index 434e903..5a4a79b 100644
51238--- a/drivers/scsi/lpfc/lpfc.h
51239+++ b/drivers/scsi/lpfc/lpfc.h
51240@@ -430,7 +430,7 @@ struct lpfc_vport {
51241 struct dentry *debug_nodelist;
51242 struct dentry *vport_debugfs_root;
51243 struct lpfc_debugfs_trc *disc_trc;
51244- atomic_t disc_trc_cnt;
51245+ atomic_unchecked_t disc_trc_cnt;
51246 #endif
51247 uint8_t stat_data_enabled;
51248 uint8_t stat_data_blocked;
51249@@ -880,8 +880,8 @@ struct lpfc_hba {
51250 struct timer_list fabric_block_timer;
51251 unsigned long bit_flags;
51252 #define FABRIC_COMANDS_BLOCKED 0
51253- atomic_t num_rsrc_err;
51254- atomic_t num_cmd_success;
51255+ atomic_unchecked_t num_rsrc_err;
51256+ atomic_unchecked_t num_cmd_success;
51257 unsigned long last_rsrc_error_time;
51258 unsigned long last_ramp_down_time;
51259 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51260@@ -916,7 +916,7 @@ struct lpfc_hba {
51261
51262 struct dentry *debug_slow_ring_trc;
51263 struct lpfc_debugfs_trc *slow_ring_trc;
51264- atomic_t slow_ring_trc_cnt;
51265+ atomic_unchecked_t slow_ring_trc_cnt;
51266 /* iDiag debugfs sub-directory */
51267 struct dentry *idiag_root;
51268 struct dentry *idiag_pci_cfg;
51269diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51270index 5633e7d..8272114 100644
51271--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51272+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51273@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51274
51275 #include <linux/debugfs.h>
51276
51277-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51278+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51279 static unsigned long lpfc_debugfs_start_time = 0L;
51280
51281 /* iDiag */
51282@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51283 lpfc_debugfs_enable = 0;
51284
51285 len = 0;
51286- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51287+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51288 (lpfc_debugfs_max_disc_trc - 1);
51289 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51290 dtp = vport->disc_trc + i;
51291@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51292 lpfc_debugfs_enable = 0;
51293
51294 len = 0;
51295- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51296+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51297 (lpfc_debugfs_max_slow_ring_trc - 1);
51298 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51299 dtp = phba->slow_ring_trc + i;
51300@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51301 !vport || !vport->disc_trc)
51302 return;
51303
51304- index = atomic_inc_return(&vport->disc_trc_cnt) &
51305+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51306 (lpfc_debugfs_max_disc_trc - 1);
51307 dtp = vport->disc_trc + index;
51308 dtp->fmt = fmt;
51309 dtp->data1 = data1;
51310 dtp->data2 = data2;
51311 dtp->data3 = data3;
51312- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51313+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51314 dtp->jif = jiffies;
51315 #endif
51316 return;
51317@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51318 !phba || !phba->slow_ring_trc)
51319 return;
51320
51321- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51322+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51323 (lpfc_debugfs_max_slow_ring_trc - 1);
51324 dtp = phba->slow_ring_trc + index;
51325 dtp->fmt = fmt;
51326 dtp->data1 = data1;
51327 dtp->data2 = data2;
51328 dtp->data3 = data3;
51329- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51330+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51331 dtp->jif = jiffies;
51332 #endif
51333 return;
51334@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51335 "slow_ring buffer\n");
51336 goto debug_failed;
51337 }
51338- atomic_set(&phba->slow_ring_trc_cnt, 0);
51339+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51340 memset(phba->slow_ring_trc, 0,
51341 (sizeof(struct lpfc_debugfs_trc) *
51342 lpfc_debugfs_max_slow_ring_trc));
51343@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51344 "buffer\n");
51345 goto debug_failed;
51346 }
51347- atomic_set(&vport->disc_trc_cnt, 0);
51348+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51349
51350 snprintf(name, sizeof(name), "discovery_trace");
51351 vport->debug_disc_trc =
51352diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51353index 0b2c53a..aec2b45 100644
51354--- a/drivers/scsi/lpfc/lpfc_init.c
51355+++ b/drivers/scsi/lpfc/lpfc_init.c
51356@@ -11290,8 +11290,10 @@ lpfc_init(void)
51357 "misc_register returned with status %d", error);
51358
51359 if (lpfc_enable_npiv) {
51360- lpfc_transport_functions.vport_create = lpfc_vport_create;
51361- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51362+ pax_open_kernel();
51363+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51364+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51365+ pax_close_kernel();
51366 }
51367 lpfc_transport_template =
51368 fc_attach_transport(&lpfc_transport_functions);
51369diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51370index 4f9222e..f1850e3 100644
51371--- a/drivers/scsi/lpfc/lpfc_scsi.c
51372+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51373@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51374 unsigned long expires;
51375
51376 spin_lock_irqsave(&phba->hbalock, flags);
51377- atomic_inc(&phba->num_rsrc_err);
51378+ atomic_inc_unchecked(&phba->num_rsrc_err);
51379 phba->last_rsrc_error_time = jiffies;
51380
51381 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51382@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51383 unsigned long num_rsrc_err, num_cmd_success;
51384 int i;
51385
51386- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51387- num_cmd_success = atomic_read(&phba->num_cmd_success);
51388+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51389+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51390
51391 /*
51392 * The error and success command counters are global per
51393@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51394 }
51395 }
51396 lpfc_destroy_vport_work_array(phba, vports);
51397- atomic_set(&phba->num_rsrc_err, 0);
51398- atomic_set(&phba->num_cmd_success, 0);
51399+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51400+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51401 }
51402
51403 /**
51404diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51405index 6a1c036..38e0e8d 100644
51406--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51407+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51408@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51409 {
51410 struct scsi_device *sdev = to_scsi_device(dev);
51411 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51412- static struct _raid_device *raid_device;
51413+ struct _raid_device *raid_device;
51414 unsigned long flags;
51415 Mpi2RaidVolPage0_t vol_pg0;
51416 Mpi2ConfigReply_t mpi_reply;
51417@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51418 {
51419 struct scsi_device *sdev = to_scsi_device(dev);
51420 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51421- static struct _raid_device *raid_device;
51422+ struct _raid_device *raid_device;
51423 unsigned long flags;
51424 Mpi2RaidVolPage0_t vol_pg0;
51425 Mpi2ConfigReply_t mpi_reply;
51426@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51427 Mpi2EventDataIrOperationStatus_t *event_data =
51428 (Mpi2EventDataIrOperationStatus_t *)
51429 fw_event->event_data;
51430- static struct _raid_device *raid_device;
51431+ struct _raid_device *raid_device;
51432 unsigned long flags;
51433 u16 handle;
51434
51435@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51436 u64 sas_address;
51437 struct _sas_device *sas_device;
51438 struct _sas_node *expander_device;
51439- static struct _raid_device *raid_device;
51440+ struct _raid_device *raid_device;
51441 u8 retry_count;
51442 unsigned long flags;
51443
51444diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51445index 8c27b6a..607f56e 100644
51446--- a/drivers/scsi/pmcraid.c
51447+++ b/drivers/scsi/pmcraid.c
51448@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51449 res->scsi_dev = scsi_dev;
51450 scsi_dev->hostdata = res;
51451 res->change_detected = 0;
51452- atomic_set(&res->read_failures, 0);
51453- atomic_set(&res->write_failures, 0);
51454+ atomic_set_unchecked(&res->read_failures, 0);
51455+ atomic_set_unchecked(&res->write_failures, 0);
51456 rc = 0;
51457 }
51458 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51459@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51460
51461 /* If this was a SCSI read/write command keep count of errors */
51462 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51463- atomic_inc(&res->read_failures);
51464+ atomic_inc_unchecked(&res->read_failures);
51465 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51466- atomic_inc(&res->write_failures);
51467+ atomic_inc_unchecked(&res->write_failures);
51468
51469 if (!RES_IS_GSCSI(res->cfg_entry) &&
51470 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51471@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51472 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51473 * hrrq_id assigned here in queuecommand
51474 */
51475- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51476+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51477 pinstance->num_hrrq;
51478 cmd->cmd_done = pmcraid_io_done;
51479
51480@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51481 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51482 * hrrq_id assigned here in queuecommand
51483 */
51484- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51485+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51486 pinstance->num_hrrq;
51487
51488 if (request_size) {
51489@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51490
51491 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51492 /* add resources only after host is added into system */
51493- if (!atomic_read(&pinstance->expose_resources))
51494+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51495 return;
51496
51497 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51498@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51499 init_waitqueue_head(&pinstance->reset_wait_q);
51500
51501 atomic_set(&pinstance->outstanding_cmds, 0);
51502- atomic_set(&pinstance->last_message_id, 0);
51503- atomic_set(&pinstance->expose_resources, 0);
51504+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51505+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51506
51507 INIT_LIST_HEAD(&pinstance->free_res_q);
51508 INIT_LIST_HEAD(&pinstance->used_res_q);
51509@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51510 /* Schedule worker thread to handle CCN and take care of adding and
51511 * removing devices to OS
51512 */
51513- atomic_set(&pinstance->expose_resources, 1);
51514+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51515 schedule_work(&pinstance->worker_q);
51516 return rc;
51517
51518diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51519index e1d150f..6c6df44 100644
51520--- a/drivers/scsi/pmcraid.h
51521+++ b/drivers/scsi/pmcraid.h
51522@@ -748,7 +748,7 @@ struct pmcraid_instance {
51523 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51524
51525 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51526- atomic_t last_message_id;
51527+ atomic_unchecked_t last_message_id;
51528
51529 /* configuration table */
51530 struct pmcraid_config_table *cfg_table;
51531@@ -777,7 +777,7 @@ struct pmcraid_instance {
51532 atomic_t outstanding_cmds;
51533
51534 /* should add/delete resources to mid-layer now ?*/
51535- atomic_t expose_resources;
51536+ atomic_unchecked_t expose_resources;
51537
51538
51539
51540@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51541 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51542 };
51543 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51544- atomic_t read_failures; /* count of failed READ commands */
51545- atomic_t write_failures; /* count of failed WRITE commands */
51546+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51547+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51548
51549 /* To indicate add/delete/modify during CCN */
51550 u8 change_detected;
51551diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51552index 82b92c4..3178171 100644
51553--- a/drivers/scsi/qla2xxx/qla_attr.c
51554+++ b/drivers/scsi/qla2xxx/qla_attr.c
51555@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51556 return 0;
51557 }
51558
51559-struct fc_function_template qla2xxx_transport_functions = {
51560+fc_function_template_no_const qla2xxx_transport_functions = {
51561
51562 .show_host_node_name = 1,
51563 .show_host_port_name = 1,
51564@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51565 .bsg_timeout = qla24xx_bsg_timeout,
51566 };
51567
51568-struct fc_function_template qla2xxx_transport_vport_functions = {
51569+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51570
51571 .show_host_node_name = 1,
51572 .show_host_port_name = 1,
51573diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51574index 7686bfe..4710893 100644
51575--- a/drivers/scsi/qla2xxx/qla_gbl.h
51576+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51577@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51578 struct device_attribute;
51579 extern struct device_attribute *qla2x00_host_attrs[];
51580 struct fc_function_template;
51581-extern struct fc_function_template qla2xxx_transport_functions;
51582-extern struct fc_function_template qla2xxx_transport_vport_functions;
51583+extern fc_function_template_no_const qla2xxx_transport_functions;
51584+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51585 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51586 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51587 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51588diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51589index cce1cbc..5b9f0fe 100644
51590--- a/drivers/scsi/qla2xxx/qla_os.c
51591+++ b/drivers/scsi/qla2xxx/qla_os.c
51592@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51593 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51594 /* Ok, a 64bit DMA mask is applicable. */
51595 ha->flags.enable_64bit_addressing = 1;
51596- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51597- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51598+ pax_open_kernel();
51599+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51600+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51601+ pax_close_kernel();
51602 return;
51603 }
51604 }
51605diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51606index 8f6d0fb..1b21097 100644
51607--- a/drivers/scsi/qla4xxx/ql4_def.h
51608+++ b/drivers/scsi/qla4xxx/ql4_def.h
51609@@ -305,7 +305,7 @@ struct ddb_entry {
51610 * (4000 only) */
51611 atomic_t relogin_timer; /* Max Time to wait for
51612 * relogin to complete */
51613- atomic_t relogin_retry_count; /* Num of times relogin has been
51614+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51615 * retried */
51616 uint32_t default_time2wait; /* Default Min time between
51617 * relogins (+aens) */
51618diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51619index 6d25879..3031a9f 100644
51620--- a/drivers/scsi/qla4xxx/ql4_os.c
51621+++ b/drivers/scsi/qla4xxx/ql4_os.c
51622@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51623 */
51624 if (!iscsi_is_session_online(cls_sess)) {
51625 /* Reset retry relogin timer */
51626- atomic_inc(&ddb_entry->relogin_retry_count);
51627+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51628 DEBUG2(ql4_printk(KERN_INFO, ha,
51629 "%s: index[%d] relogin timed out-retrying"
51630 " relogin (%d), retry (%d)\n", __func__,
51631 ddb_entry->fw_ddb_index,
51632- atomic_read(&ddb_entry->relogin_retry_count),
51633+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51634 ddb_entry->default_time2wait + 4));
51635 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51636 atomic_set(&ddb_entry->retry_relogin_timer,
51637@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51638
51639 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51640 atomic_set(&ddb_entry->relogin_timer, 0);
51641- atomic_set(&ddb_entry->relogin_retry_count, 0);
51642+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51643 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51644 ddb_entry->default_relogin_timeout =
51645 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51646diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51647index 17bb541..85f4508 100644
51648--- a/drivers/scsi/scsi_lib.c
51649+++ b/drivers/scsi/scsi_lib.c
51650@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51651 shost = sdev->host;
51652 scsi_init_cmd_errh(cmd);
51653 cmd->result = DID_NO_CONNECT << 16;
51654- atomic_inc(&cmd->device->iorequest_cnt);
51655+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51656
51657 /*
51658 * SCSI request completion path will do scsi_device_unbusy(),
51659@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
51660
51661 INIT_LIST_HEAD(&cmd->eh_entry);
51662
51663- atomic_inc(&cmd->device->iodone_cnt);
51664+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51665 if (cmd->result)
51666- atomic_inc(&cmd->device->ioerr_cnt);
51667+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51668
51669 disposition = scsi_decide_disposition(cmd);
51670 if (disposition != SUCCESS &&
51671@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51672 struct Scsi_Host *host = cmd->device->host;
51673 int rtn = 0;
51674
51675- atomic_inc(&cmd->device->iorequest_cnt);
51676+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51677
51678 /* check if the device is still usable */
51679 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51680diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51681index 1ac38e7..6acc656 100644
51682--- a/drivers/scsi/scsi_sysfs.c
51683+++ b/drivers/scsi/scsi_sysfs.c
51684@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51685 char *buf) \
51686 { \
51687 struct scsi_device *sdev = to_scsi_device(dev); \
51688- unsigned long long count = atomic_read(&sdev->field); \
51689+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51690 return snprintf(buf, 20, "0x%llx\n", count); \
51691 } \
51692 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51693diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51694index 5d6f348..18778a6b 100644
51695--- a/drivers/scsi/scsi_transport_fc.c
51696+++ b/drivers/scsi/scsi_transport_fc.c
51697@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51698 * Netlink Infrastructure
51699 */
51700
51701-static atomic_t fc_event_seq;
51702+static atomic_unchecked_t fc_event_seq;
51703
51704 /**
51705 * fc_get_event_number - Obtain the next sequential FC event number
51706@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51707 u32
51708 fc_get_event_number(void)
51709 {
51710- return atomic_add_return(1, &fc_event_seq);
51711+ return atomic_add_return_unchecked(1, &fc_event_seq);
51712 }
51713 EXPORT_SYMBOL(fc_get_event_number);
51714
51715@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51716 {
51717 int error;
51718
51719- atomic_set(&fc_event_seq, 0);
51720+ atomic_set_unchecked(&fc_event_seq, 0);
51721
51722 error = transport_class_register(&fc_host_class);
51723 if (error)
51724@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51725 char *cp;
51726
51727 *val = simple_strtoul(buf, &cp, 0);
51728- if ((*cp && (*cp != '\n')) || (*val < 0))
51729+ if (*cp && (*cp != '\n'))
51730 return -EINVAL;
51731 /*
51732 * Check for overflow; dev_loss_tmo is u32
51733diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51734index 67d43e3..8cee73c 100644
51735--- a/drivers/scsi/scsi_transport_iscsi.c
51736+++ b/drivers/scsi/scsi_transport_iscsi.c
51737@@ -79,7 +79,7 @@ struct iscsi_internal {
51738 struct transport_container session_cont;
51739 };
51740
51741-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51742+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51743 static struct workqueue_struct *iscsi_eh_timer_workq;
51744
51745 static DEFINE_IDA(iscsi_sess_ida);
51746@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51747 int err;
51748
51749 ihost = shost->shost_data;
51750- session->sid = atomic_add_return(1, &iscsi_session_nr);
51751+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51752
51753 if (target_id == ISCSI_MAX_TARGET) {
51754 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51755@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51756 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51757 ISCSI_TRANSPORT_VERSION);
51758
51759- atomic_set(&iscsi_session_nr, 0);
51760+ atomic_set_unchecked(&iscsi_session_nr, 0);
51761
51762 err = class_register(&iscsi_transport_class);
51763 if (err)
51764diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51765index ae45bd9..c32a586 100644
51766--- a/drivers/scsi/scsi_transport_srp.c
51767+++ b/drivers/scsi/scsi_transport_srp.c
51768@@ -35,7 +35,7 @@
51769 #include "scsi_priv.h"
51770
51771 struct srp_host_attrs {
51772- atomic_t next_port_id;
51773+ atomic_unchecked_t next_port_id;
51774 };
51775 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51776
51777@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51778 struct Scsi_Host *shost = dev_to_shost(dev);
51779 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51780
51781- atomic_set(&srp_host->next_port_id, 0);
51782+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51783 return 0;
51784 }
51785
51786@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51787 rport_fast_io_fail_timedout);
51788 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51789
51790- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51791+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51792 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51793
51794 transport_setup_device(&rport->dev);
51795diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51796index 05ea0d4..5af8049 100644
51797--- a/drivers/scsi/sd.c
51798+++ b/drivers/scsi/sd.c
51799@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51800 sdkp->disk = gd;
51801 sdkp->index = index;
51802 atomic_set(&sdkp->openers, 0);
51803- atomic_set(&sdkp->device->ioerr_cnt, 0);
51804+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51805
51806 if (!sdp->request_queue->rq_timeout) {
51807 if (sdp->type != TYPE_MOD)
51808diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51809index dbf8e77..0d565c7 100644
51810--- a/drivers/scsi/sg.c
51811+++ b/drivers/scsi/sg.c
51812@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51813 sdp->disk->disk_name,
51814 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51815 NULL,
51816- (char *)arg);
51817+ (char __user *)arg);
51818 case BLKTRACESTART:
51819 return blk_trace_startstop(sdp->device->request_queue, 1);
51820 case BLKTRACESTOP:
51821diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51822index 011a336..fb2b7a0 100644
51823--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51824+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51825@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51826 return i;
51827 }
51828
51829-static struct bin_attribute fuse_bin_attr = {
51830+static bin_attribute_no_const fuse_bin_attr = {
51831 .attr = { .name = "fuse", .mode = S_IRUGO, },
51832 .read = fuse_read,
51833 };
51834diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51835index 66a70e9..f82cea4 100644
51836--- a/drivers/spi/spi.c
51837+++ b/drivers/spi/spi.c
51838@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
51839 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51840
51841 /* portable code must never pass more than 32 bytes */
51842-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51843+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51844
51845 static u8 *buf;
51846
51847diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51848index b41429f..2de5373 100644
51849--- a/drivers/staging/android/timed_output.c
51850+++ b/drivers/staging/android/timed_output.c
51851@@ -25,7 +25,7 @@
51852 #include "timed_output.h"
51853
51854 static struct class *timed_output_class;
51855-static atomic_t device_count;
51856+static atomic_unchecked_t device_count;
51857
51858 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51859 char *buf)
51860@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51861 timed_output_class = class_create(THIS_MODULE, "timed_output");
51862 if (IS_ERR(timed_output_class))
51863 return PTR_ERR(timed_output_class);
51864- atomic_set(&device_count, 0);
51865+ atomic_set_unchecked(&device_count, 0);
51866 timed_output_class->dev_groups = timed_output_groups;
51867 }
51868
51869@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51870 if (ret < 0)
51871 return ret;
51872
51873- tdev->index = atomic_inc_return(&device_count);
51874+ tdev->index = atomic_inc_return_unchecked(&device_count);
51875 tdev->dev = device_create(timed_output_class, NULL,
51876 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51877 if (IS_ERR(tdev->dev))
51878diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51879index f143cb6..6fb8255 100644
51880--- a/drivers/staging/comedi/comedi_fops.c
51881+++ b/drivers/staging/comedi/comedi_fops.c
51882@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51883 }
51884 cfp->last_attached = dev->attached;
51885 cfp->last_detach_count = dev->detach_count;
51886- ACCESS_ONCE(cfp->read_subdev) = read_s;
51887- ACCESS_ONCE(cfp->write_subdev) = write_s;
51888+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51889+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51890 }
51891
51892 static void comedi_file_check(struct file *file)
51893@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51894 !(s_old->async->cmd.flags & CMDF_WRITE))
51895 return -EBUSY;
51896
51897- ACCESS_ONCE(cfp->read_subdev) = s_new;
51898+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51899 return 0;
51900 }
51901
51902@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51903 (s_old->async->cmd.flags & CMDF_WRITE))
51904 return -EBUSY;
51905
51906- ACCESS_ONCE(cfp->write_subdev) = s_new;
51907+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51908 return 0;
51909 }
51910
51911diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51912index 001348c..cfaac8a 100644
51913--- a/drivers/staging/gdm724x/gdm_tty.c
51914+++ b/drivers/staging/gdm724x/gdm_tty.c
51915@@ -44,7 +44,7 @@
51916 #define gdm_tty_send_control(n, r, v, d, l) (\
51917 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51918
51919-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51920+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51921
51922 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
51923 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
51924diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
51925index 503b2d7..c904931 100644
51926--- a/drivers/staging/line6/driver.c
51927+++ b/drivers/staging/line6/driver.c
51928@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51929 {
51930 struct usb_device *usbdev = line6->usbdev;
51931 int ret;
51932- unsigned char len;
51933+ unsigned char *plen;
51934
51935 /* query the serial number: */
51936 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51937@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51938 return ret;
51939 }
51940
51941+ plen = kmalloc(1, GFP_KERNEL);
51942+ if (plen == NULL)
51943+ return -ENOMEM;
51944+
51945 /* Wait for data length. We'll get 0xff until length arrives. */
51946 do {
51947 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51948 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51949 USB_DIR_IN,
51950- 0x0012, 0x0000, &len, 1,
51951+ 0x0012, 0x0000, plen, 1,
51952 LINE6_TIMEOUT * HZ);
51953 if (ret < 0) {
51954 dev_err(line6->ifcdev,
51955 "receive length failed (error %d)\n", ret);
51956+ kfree(plen);
51957 return ret;
51958 }
51959- } while (len == 0xff);
51960+ } while (*plen == 0xff);
51961
51962- if (len != datalen) {
51963+ if (*plen != datalen) {
51964 /* should be equal or something went wrong */
51965 dev_err(line6->ifcdev,
51966 "length mismatch (expected %d, got %d)\n",
51967- (int)datalen, (int)len);
51968+ (int)datalen, (int)*plen);
51969+ kfree(plen);
51970 return -EINVAL;
51971 }
51972+ kfree(plen);
51973
51974 /* receive the result: */
51975 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51976@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51977 {
51978 struct usb_device *usbdev = line6->usbdev;
51979 int ret;
51980- unsigned char status;
51981+ unsigned char *status;
51982
51983 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51984 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
51985@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51986 return ret;
51987 }
51988
51989+ status = kmalloc(1, GFP_KERNEL);
51990+ if (status == NULL)
51991+ return -ENOMEM;
51992+
51993 do {
51994 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
51995 0x67,
51996 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51997 USB_DIR_IN,
51998 0x0012, 0x0000,
51999- &status, 1, LINE6_TIMEOUT * HZ);
52000+ status, 1, LINE6_TIMEOUT * HZ);
52001
52002 if (ret < 0) {
52003 dev_err(line6->ifcdev,
52004 "receiving status failed (error %d)\n", ret);
52005+ kfree(status);
52006 return ret;
52007 }
52008- } while (status == 0xff);
52009+ } while (*status == 0xff);
52010
52011- if (status != 0) {
52012+ if (*status != 0) {
52013 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
52014+ kfree(status);
52015 return -EINVAL;
52016 }
52017
52018+ kfree(status);
52019+
52020 return 0;
52021 }
52022
52023diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
52024index 6943715..0a93632 100644
52025--- a/drivers/staging/line6/toneport.c
52026+++ b/drivers/staging/line6/toneport.c
52027@@ -11,6 +11,7 @@
52028 */
52029
52030 #include <linux/wait.h>
52031+#include <linux/slab.h>
52032 #include <sound/control.h>
52033
52034 #include "audio.h"
52035@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
52036 */
52037 static void toneport_setup(struct usb_line6_toneport *toneport)
52038 {
52039- int ticks;
52040+ int *ticks;
52041 struct usb_line6 *line6 = &toneport->line6;
52042 struct usb_device *usbdev = line6->usbdev;
52043 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
52044
52045+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
52046+ if (ticks == NULL)
52047+ return;
52048+
52049 /* sync time on device with host: */
52050- ticks = (int)get_seconds();
52051- line6_write_data(line6, 0x80c6, &ticks, 4);
52052+ *ticks = (int)get_seconds();
52053+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
52054+
52055+ kfree(ticks);
52056
52057 /* enable device: */
52058 toneport_send_cmd(usbdev, 0x0301, 0x0000);
52059diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52060index 463da07..e791ce9 100644
52061--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52062+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52063@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52064 return 0;
52065 }
52066
52067-sfw_test_client_ops_t brw_test_client;
52068-void brw_init_test_client(void)
52069-{
52070- brw_test_client.tso_init = brw_client_init;
52071- brw_test_client.tso_fini = brw_client_fini;
52072- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52073- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52074+sfw_test_client_ops_t brw_test_client = {
52075+ .tso_init = brw_client_init,
52076+ .tso_fini = brw_client_fini,
52077+ .tso_prep_rpc = brw_client_prep_rpc,
52078+ .tso_done_rpc = brw_client_done_rpc,
52079 };
52080
52081 srpc_service_t brw_test_service;
52082diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52083index cc9d182..8fabce3 100644
52084--- a/drivers/staging/lustre/lnet/selftest/framework.c
52085+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52086@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52087
52088 extern sfw_test_client_ops_t ping_test_client;
52089 extern srpc_service_t ping_test_service;
52090-extern void ping_init_test_client(void);
52091 extern void ping_init_test_service(void);
52092
52093 extern sfw_test_client_ops_t brw_test_client;
52094 extern srpc_service_t brw_test_service;
52095-extern void brw_init_test_client(void);
52096 extern void brw_init_test_service(void);
52097
52098
52099@@ -1675,12 +1673,10 @@ sfw_startup (void)
52100 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52101 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52102
52103- brw_init_test_client();
52104 brw_init_test_service();
52105 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52106 LASSERT (rc == 0);
52107
52108- ping_init_test_client();
52109 ping_init_test_service();
52110 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52111 LASSERT (rc == 0);
52112diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52113index d8c0df6..5041cbb 100644
52114--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52115+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52116@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52117 return 0;
52118 }
52119
52120-sfw_test_client_ops_t ping_test_client;
52121-void ping_init_test_client(void)
52122-{
52123- ping_test_client.tso_init = ping_client_init;
52124- ping_test_client.tso_fini = ping_client_fini;
52125- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52126- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52127-}
52128+sfw_test_client_ops_t ping_test_client = {
52129+ .tso_init = ping_client_init,
52130+ .tso_fini = ping_client_fini,
52131+ .tso_prep_rpc = ping_client_prep_rpc,
52132+ .tso_done_rpc = ping_client_done_rpc,
52133+};
52134
52135 srpc_service_t ping_test_service;
52136 void ping_init_test_service(void)
52137diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52138index 83bc0a9..12ba00a 100644
52139--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52140+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52141@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52142 ldlm_completion_callback lcs_completion;
52143 ldlm_blocking_callback lcs_blocking;
52144 ldlm_glimpse_callback lcs_glimpse;
52145-};
52146+} __no_const;
52147
52148 /* ldlm_lockd.c */
52149 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52150diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52151index 2a88b80..62e7e5f 100644
52152--- a/drivers/staging/lustre/lustre/include/obd.h
52153+++ b/drivers/staging/lustre/lustre/include/obd.h
52154@@ -1362,7 +1362,7 @@ struct md_ops {
52155 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52156 * wrapper function in include/linux/obd_class.h.
52157 */
52158-};
52159+} __no_const;
52160
52161 struct lsm_operations {
52162 void (*lsm_free)(struct lov_stripe_md *);
52163diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52164index a4c252f..b21acac 100644
52165--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52166+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52167@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52168 int added = (mode == LCK_NL);
52169 int overlaps = 0;
52170 int splitted = 0;
52171- const struct ldlm_callback_suite null_cbs = { NULL };
52172+ const struct ldlm_callback_suite null_cbs = { };
52173
52174 CDEBUG(D_DLMTRACE,
52175 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52176diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52177index 83d3f08..b03adad 100644
52178--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52179+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52180@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52181 void __user *buffer, size_t *lenp, loff_t *ppos)
52182 {
52183 int rc, max_delay_cs;
52184- struct ctl_table dummy = *table;
52185+ ctl_table_no_const dummy = *table;
52186 long d;
52187
52188 dummy.data = &max_delay_cs;
52189@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52190 void __user *buffer, size_t *lenp, loff_t *ppos)
52191 {
52192 int rc, min_delay_cs;
52193- struct ctl_table dummy = *table;
52194+ ctl_table_no_const dummy = *table;
52195 long d;
52196
52197 dummy.data = &min_delay_cs;
52198@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52199 void __user *buffer, size_t *lenp, loff_t *ppos)
52200 {
52201 int rc, backoff;
52202- struct ctl_table dummy = *table;
52203+ ctl_table_no_const dummy = *table;
52204
52205 dummy.data = &backoff;
52206 dummy.proc_handler = &proc_dointvec;
52207diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52208index 2c4fc74..b04ca79 100644
52209--- a/drivers/staging/lustre/lustre/libcfs/module.c
52210+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52211@@ -315,11 +315,11 @@ out:
52212
52213
52214 struct cfs_psdev_ops libcfs_psdev_ops = {
52215- libcfs_psdev_open,
52216- libcfs_psdev_release,
52217- NULL,
52218- NULL,
52219- libcfs_ioctl
52220+ .p_open = libcfs_psdev_open,
52221+ .p_close = libcfs_psdev_release,
52222+ .p_read = NULL,
52223+ .p_write = NULL,
52224+ .p_ioctl = libcfs_ioctl
52225 };
52226
52227 extern int insert_proc(void);
52228diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52229index fcbe836..8a7ada4 100644
52230--- a/drivers/staging/octeon/ethernet-rx.c
52231+++ b/drivers/staging/octeon/ethernet-rx.c
52232@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52233 /* Increment RX stats for virtual ports */
52234 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52235 #ifdef CONFIG_64BIT
52236- atomic64_add(1,
52237+ atomic64_add_unchecked(1,
52238 (atomic64_t *)&priv->stats.rx_packets);
52239- atomic64_add(skb->len,
52240+ atomic64_add_unchecked(skb->len,
52241 (atomic64_t *)&priv->stats.rx_bytes);
52242 #else
52243- atomic_add(1,
52244+ atomic_add_unchecked(1,
52245 (atomic_t *)&priv->stats.rx_packets);
52246- atomic_add(skb->len,
52247+ atomic_add_unchecked(skb->len,
52248 (atomic_t *)&priv->stats.rx_bytes);
52249 #endif
52250 }
52251@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52252 dev->name);
52253 */
52254 #ifdef CONFIG_64BIT
52255- atomic64_add(1,
52256+ atomic64_add_unchecked(1,
52257 (atomic64_t *)&priv->stats.rx_dropped);
52258 #else
52259- atomic_add(1,
52260+ atomic_add_unchecked(1,
52261 (atomic_t *)&priv->stats.rx_dropped);
52262 #endif
52263 dev_kfree_skb_irq(skb);
52264diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52265index ee32149..052d1836 100644
52266--- a/drivers/staging/octeon/ethernet.c
52267+++ b/drivers/staging/octeon/ethernet.c
52268@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52269 * since the RX tasklet also increments it.
52270 */
52271 #ifdef CONFIG_64BIT
52272- atomic64_add(rx_status.dropped_packets,
52273- (atomic64_t *)&priv->stats.rx_dropped);
52274+ atomic64_add_unchecked(rx_status.dropped_packets,
52275+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52276 #else
52277- atomic_add(rx_status.dropped_packets,
52278- (atomic_t *)&priv->stats.rx_dropped);
52279+ atomic_add_unchecked(rx_status.dropped_packets,
52280+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52281 #endif
52282 }
52283
52284diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52285index 3b476d8..f522d68 100644
52286--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52287+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52288@@ -225,7 +225,7 @@ struct hal_ops {
52289
52290 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52291 void (*hal_reset_security_engine)(struct adapter *adapter);
52292-};
52293+} __no_const;
52294
52295 enum rt_eeprom_type {
52296 EEPROM_93C46,
52297diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52298index 070cc03..6806e37 100644
52299--- a/drivers/staging/rtl8712/rtl871x_io.h
52300+++ b/drivers/staging/rtl8712/rtl871x_io.h
52301@@ -108,7 +108,7 @@ struct _io_ops {
52302 u8 *pmem);
52303 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52304 u8 *pmem);
52305-};
52306+} __no_const;
52307
52308 struct io_req {
52309 struct list_head list;
52310diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52311index 46dad63..fe4acdc 100644
52312--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52313+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52314@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52315 void (*device_resume)(ulong bus_no, ulong dev_no);
52316 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52317 ulong *max_size);
52318-};
52319+} __no_const;
52320
52321 /* These functions live inside visorchipset, and will be called to indicate
52322 * responses to specific events (by code outside of visorchipset).
52323@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52324 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52325 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52326 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52327-};
52328+} __no_const;
52329
52330 /** Register functions (in the bus driver) to get called by visorchipset
52331 * whenever a bus or device appears for which this service partition is
52332diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52333index 9512af6..045bf5a 100644
52334--- a/drivers/target/sbp/sbp_target.c
52335+++ b/drivers/target/sbp/sbp_target.c
52336@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52337
52338 #define SESSION_MAINTENANCE_INTERVAL HZ
52339
52340-static atomic_t login_id = ATOMIC_INIT(0);
52341+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52342
52343 static void session_maintenance_work(struct work_struct *);
52344 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52345@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52346 login->lun = se_lun;
52347 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52348 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52349- login->login_id = atomic_inc_return(&login_id);
52350+ login->login_id = atomic_inc_return_unchecked(&login_id);
52351
52352 login->tgt_agt = sbp_target_agent_register(login);
52353 if (IS_ERR(login->tgt_agt)) {
52354diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52355index 54da2a4..3dd6f57 100644
52356--- a/drivers/target/target_core_device.c
52357+++ b/drivers/target/target_core_device.c
52358@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52359 spin_lock_init(&dev->se_tmr_lock);
52360 spin_lock_init(&dev->qf_cmd_lock);
52361 sema_init(&dev->caw_sem, 1);
52362- atomic_set(&dev->dev_ordered_id, 0);
52363+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52364 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52365 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52366 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52367diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52368index ac3cbab..f0d1dd2 100644
52369--- a/drivers/target/target_core_transport.c
52370+++ b/drivers/target/target_core_transport.c
52371@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52372 * Used to determine when ORDERED commands should go from
52373 * Dormant to Active status.
52374 */
52375- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52376+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52377 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52378 cmd->se_ordered_id, cmd->sam_task_attr,
52379 dev->transport->name);
52380diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52381index 65a98a9..d93d3a8 100644
52382--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52383+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52384@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52385 platform_set_drvdata(pdev, priv);
52386
52387 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52388- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52389- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52390+ pax_open_kernel();
52391+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52392+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52393+ pax_close_kernel();
52394 }
52395 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52396 priv, &int3400_thermal_ops,
52397diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52398index d717f3d..cae1cc3e 100644
52399--- a/drivers/thermal/of-thermal.c
52400+++ b/drivers/thermal/of-thermal.c
52401@@ -31,6 +31,7 @@
52402 #include <linux/export.h>
52403 #include <linux/string.h>
52404 #include <linux/thermal.h>
52405+#include <linux/mm.h>
52406
52407 #include "thermal_core.h"
52408
52409@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52410 tz->ops = ops;
52411 tz->sensor_data = data;
52412
52413- tzd->ops->get_temp = of_thermal_get_temp;
52414- tzd->ops->get_trend = of_thermal_get_trend;
52415- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52416+ pax_open_kernel();
52417+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52418+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52419+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52420+ pax_close_kernel();
52421 mutex_unlock(&tzd->lock);
52422
52423 return tzd;
52424@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52425 return;
52426
52427 mutex_lock(&tzd->lock);
52428- tzd->ops->get_temp = NULL;
52429- tzd->ops->get_trend = NULL;
52430- tzd->ops->set_emul_temp = NULL;
52431+ pax_open_kernel();
52432+ *(void **)&tzd->ops->get_temp = NULL;
52433+ *(void **)&tzd->ops->get_trend = NULL;
52434+ *(void **)&tzd->ops->set_emul_temp = NULL;
52435+ pax_close_kernel();
52436
52437 tz->ops = NULL;
52438 tz->sensor_data = NULL;
52439diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52440index fd66f57..48e6376 100644
52441--- a/drivers/tty/cyclades.c
52442+++ b/drivers/tty/cyclades.c
52443@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52444 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52445 info->port.count);
52446 #endif
52447- info->port.count++;
52448+ atomic_inc(&info->port.count);
52449 #ifdef CY_DEBUG_COUNT
52450 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52451- current->pid, info->port.count);
52452+ current->pid, atomic_read(&info->port.count));
52453 #endif
52454
52455 /*
52456@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52457 for (j = 0; j < cy_card[i].nports; j++) {
52458 info = &cy_card[i].ports[j];
52459
52460- if (info->port.count) {
52461+ if (atomic_read(&info->port.count)) {
52462 /* XXX is the ldisc num worth this? */
52463 struct tty_struct *tty;
52464 struct tty_ldisc *ld;
52465diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52466index 4fcec1d..5a036f7 100644
52467--- a/drivers/tty/hvc/hvc_console.c
52468+++ b/drivers/tty/hvc/hvc_console.c
52469@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52470
52471 spin_lock_irqsave(&hp->port.lock, flags);
52472 /* Check and then increment for fast path open. */
52473- if (hp->port.count++ > 0) {
52474+ if (atomic_inc_return(&hp->port.count) > 1) {
52475 spin_unlock_irqrestore(&hp->port.lock, flags);
52476 hvc_kick();
52477 return 0;
52478@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52479
52480 spin_lock_irqsave(&hp->port.lock, flags);
52481
52482- if (--hp->port.count == 0) {
52483+ if (atomic_dec_return(&hp->port.count) == 0) {
52484 spin_unlock_irqrestore(&hp->port.lock, flags);
52485 /* We are done with the tty pointer now. */
52486 tty_port_tty_set(&hp->port, NULL);
52487@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52488 */
52489 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52490 } else {
52491- if (hp->port.count < 0)
52492+ if (atomic_read(&hp->port.count) < 0)
52493 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52494- hp->vtermno, hp->port.count);
52495+ hp->vtermno, atomic_read(&hp->port.count));
52496 spin_unlock_irqrestore(&hp->port.lock, flags);
52497 }
52498 }
52499@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52500 * open->hangup case this can be called after the final close so prevent
52501 * that from happening for now.
52502 */
52503- if (hp->port.count <= 0) {
52504+ if (atomic_read(&hp->port.count) <= 0) {
52505 spin_unlock_irqrestore(&hp->port.lock, flags);
52506 return;
52507 }
52508
52509- hp->port.count = 0;
52510+ atomic_set(&hp->port.count, 0);
52511 spin_unlock_irqrestore(&hp->port.lock, flags);
52512 tty_port_tty_set(&hp->port, NULL);
52513
52514@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52515 return -EPIPE;
52516
52517 /* FIXME what's this (unprotected) check for? */
52518- if (hp->port.count <= 0)
52519+ if (atomic_read(&hp->port.count) <= 0)
52520 return -EIO;
52521
52522 spin_lock_irqsave(&hp->lock, flags);
52523diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52524index 81ff7e1..dfb7b71 100644
52525--- a/drivers/tty/hvc/hvcs.c
52526+++ b/drivers/tty/hvc/hvcs.c
52527@@ -83,6 +83,7 @@
52528 #include <asm/hvcserver.h>
52529 #include <asm/uaccess.h>
52530 #include <asm/vio.h>
52531+#include <asm/local.h>
52532
52533 /*
52534 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52535@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52536
52537 spin_lock_irqsave(&hvcsd->lock, flags);
52538
52539- if (hvcsd->port.count > 0) {
52540+ if (atomic_read(&hvcsd->port.count) > 0) {
52541 spin_unlock_irqrestore(&hvcsd->lock, flags);
52542 printk(KERN_INFO "HVCS: vterm state unchanged. "
52543 "The hvcs device node is still in use.\n");
52544@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52545 }
52546 }
52547
52548- hvcsd->port.count = 0;
52549+ atomic_set(&hvcsd->port.count, 0);
52550 hvcsd->port.tty = tty;
52551 tty->driver_data = hvcsd;
52552
52553@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52554 unsigned long flags;
52555
52556 spin_lock_irqsave(&hvcsd->lock, flags);
52557- hvcsd->port.count++;
52558+ atomic_inc(&hvcsd->port.count);
52559 hvcsd->todo_mask |= HVCS_SCHED_READ;
52560 spin_unlock_irqrestore(&hvcsd->lock, flags);
52561
52562@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52563 hvcsd = tty->driver_data;
52564
52565 spin_lock_irqsave(&hvcsd->lock, flags);
52566- if (--hvcsd->port.count == 0) {
52567+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52568
52569 vio_disable_interrupts(hvcsd->vdev);
52570
52571@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52572
52573 free_irq(irq, hvcsd);
52574 return;
52575- } else if (hvcsd->port.count < 0) {
52576+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52577 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52578 " is missmanaged.\n",
52579- hvcsd->vdev->unit_address, hvcsd->port.count);
52580+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52581 }
52582
52583 spin_unlock_irqrestore(&hvcsd->lock, flags);
52584@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52585
52586 spin_lock_irqsave(&hvcsd->lock, flags);
52587 /* Preserve this so that we know how many kref refs to put */
52588- temp_open_count = hvcsd->port.count;
52589+ temp_open_count = atomic_read(&hvcsd->port.count);
52590
52591 /*
52592 * Don't kref put inside the spinlock because the destruction
52593@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52594 tty->driver_data = NULL;
52595 hvcsd->port.tty = NULL;
52596
52597- hvcsd->port.count = 0;
52598+ atomic_set(&hvcsd->port.count, 0);
52599
52600 /* This will drop any buffered data on the floor which is OK in a hangup
52601 * scenario. */
52602@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52603 * the middle of a write operation? This is a crummy place to do this
52604 * but we want to keep it all in the spinlock.
52605 */
52606- if (hvcsd->port.count <= 0) {
52607+ if (atomic_read(&hvcsd->port.count) <= 0) {
52608 spin_unlock_irqrestore(&hvcsd->lock, flags);
52609 return -ENODEV;
52610 }
52611@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52612 {
52613 struct hvcs_struct *hvcsd = tty->driver_data;
52614
52615- if (!hvcsd || hvcsd->port.count <= 0)
52616+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52617 return 0;
52618
52619 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52620diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52621index 4190199..06d5bfa 100644
52622--- a/drivers/tty/hvc/hvsi.c
52623+++ b/drivers/tty/hvc/hvsi.c
52624@@ -85,7 +85,7 @@ struct hvsi_struct {
52625 int n_outbuf;
52626 uint32_t vtermno;
52627 uint32_t virq;
52628- atomic_t seqno; /* HVSI packet sequence number */
52629+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52630 uint16_t mctrl;
52631 uint8_t state; /* HVSI protocol state */
52632 uint8_t flags;
52633@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52634
52635 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52636 packet.hdr.len = sizeof(struct hvsi_query_response);
52637- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52638+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52639 packet.verb = VSV_SEND_VERSION_NUMBER;
52640 packet.u.version = HVSI_VERSION;
52641 packet.query_seqno = query_seqno+1;
52642@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52643
52644 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52645 packet.hdr.len = sizeof(struct hvsi_query);
52646- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52647+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52648 packet.verb = verb;
52649
52650 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52651@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52652 int wrote;
52653
52654 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52655- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52656+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52657 packet.hdr.len = sizeof(struct hvsi_control);
52658 packet.verb = VSV_SET_MODEM_CTL;
52659 packet.mask = HVSI_TSDTR;
52660@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52661 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52662
52663 packet.hdr.type = VS_DATA_PACKET_HEADER;
52664- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52665+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52666 packet.hdr.len = count + sizeof(struct hvsi_header);
52667 memcpy(&packet.data, buf, count);
52668
52669@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52670 struct hvsi_control packet __ALIGNED__;
52671
52672 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52673- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52674+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52675 packet.hdr.len = 6;
52676 packet.verb = VSV_CLOSE_PROTOCOL;
52677
52678@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52679
52680 tty_port_tty_set(&hp->port, tty);
52681 spin_lock_irqsave(&hp->lock, flags);
52682- hp->port.count++;
52683+ atomic_inc(&hp->port.count);
52684 atomic_set(&hp->seqno, 0);
52685 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52686 spin_unlock_irqrestore(&hp->lock, flags);
52687@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52688
52689 spin_lock_irqsave(&hp->lock, flags);
52690
52691- if (--hp->port.count == 0) {
52692+ if (atomic_dec_return(&hp->port.count) == 0) {
52693 tty_port_tty_set(&hp->port, NULL);
52694 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52695
52696@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52697
52698 spin_lock_irqsave(&hp->lock, flags);
52699 }
52700- } else if (hp->port.count < 0)
52701+ } else if (atomic_read(&hp->port.count) < 0)
52702 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52703- hp - hvsi_ports, hp->port.count);
52704+ hp - hvsi_ports, atomic_read(&hp->port.count));
52705
52706 spin_unlock_irqrestore(&hp->lock, flags);
52707 }
52708@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52709 tty_port_tty_set(&hp->port, NULL);
52710
52711 spin_lock_irqsave(&hp->lock, flags);
52712- hp->port.count = 0;
52713+ atomic_set(&hp->port.count, 0);
52714 hp->n_outbuf = 0;
52715 spin_unlock_irqrestore(&hp->lock, flags);
52716 }
52717diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52718index a270f04..7c77b5d 100644
52719--- a/drivers/tty/hvc/hvsi_lib.c
52720+++ b/drivers/tty/hvc/hvsi_lib.c
52721@@ -8,7 +8,7 @@
52722
52723 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52724 {
52725- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52726+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52727
52728 /* Assumes that always succeeds, works in practice */
52729 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52730@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52731
52732 /* Reset state */
52733 pv->established = 0;
52734- atomic_set(&pv->seqno, 0);
52735+ atomic_set_unchecked(&pv->seqno, 0);
52736
52737 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52738
52739diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52740index 345cebb..d5a1e9e 100644
52741--- a/drivers/tty/ipwireless/tty.c
52742+++ b/drivers/tty/ipwireless/tty.c
52743@@ -28,6 +28,7 @@
52744 #include <linux/tty_driver.h>
52745 #include <linux/tty_flip.h>
52746 #include <linux/uaccess.h>
52747+#include <asm/local.h>
52748
52749 #include "tty.h"
52750 #include "network.h"
52751@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52752 return -ENODEV;
52753
52754 mutex_lock(&tty->ipw_tty_mutex);
52755- if (tty->port.count == 0)
52756+ if (atomic_read(&tty->port.count) == 0)
52757 tty->tx_bytes_queued = 0;
52758
52759- tty->port.count++;
52760+ atomic_inc(&tty->port.count);
52761
52762 tty->port.tty = linux_tty;
52763 linux_tty->driver_data = tty;
52764@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52765
52766 static void do_ipw_close(struct ipw_tty *tty)
52767 {
52768- tty->port.count--;
52769-
52770- if (tty->port.count == 0) {
52771+ if (atomic_dec_return(&tty->port.count) == 0) {
52772 struct tty_struct *linux_tty = tty->port.tty;
52773
52774 if (linux_tty != NULL) {
52775@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52776 return;
52777
52778 mutex_lock(&tty->ipw_tty_mutex);
52779- if (tty->port.count == 0) {
52780+ if (atomic_read(&tty->port.count) == 0) {
52781 mutex_unlock(&tty->ipw_tty_mutex);
52782 return;
52783 }
52784@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52785
52786 mutex_lock(&tty->ipw_tty_mutex);
52787
52788- if (!tty->port.count) {
52789+ if (!atomic_read(&tty->port.count)) {
52790 mutex_unlock(&tty->ipw_tty_mutex);
52791 return;
52792 }
52793@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52794 return -ENODEV;
52795
52796 mutex_lock(&tty->ipw_tty_mutex);
52797- if (!tty->port.count) {
52798+ if (!atomic_read(&tty->port.count)) {
52799 mutex_unlock(&tty->ipw_tty_mutex);
52800 return -EINVAL;
52801 }
52802@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52803 if (!tty)
52804 return -ENODEV;
52805
52806- if (!tty->port.count)
52807+ if (!atomic_read(&tty->port.count))
52808 return -EINVAL;
52809
52810 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52811@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52812 if (!tty)
52813 return 0;
52814
52815- if (!tty->port.count)
52816+ if (!atomic_read(&tty->port.count))
52817 return 0;
52818
52819 return tty->tx_bytes_queued;
52820@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52821 if (!tty)
52822 return -ENODEV;
52823
52824- if (!tty->port.count)
52825+ if (!atomic_read(&tty->port.count))
52826 return -EINVAL;
52827
52828 return get_control_lines(tty);
52829@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52830 if (!tty)
52831 return -ENODEV;
52832
52833- if (!tty->port.count)
52834+ if (!atomic_read(&tty->port.count))
52835 return -EINVAL;
52836
52837 return set_control_lines(tty, set, clear);
52838@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52839 if (!tty)
52840 return -ENODEV;
52841
52842- if (!tty->port.count)
52843+ if (!atomic_read(&tty->port.count))
52844 return -EINVAL;
52845
52846 /* FIXME: Exactly how is the tty object locked here .. */
52847@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52848 * are gone */
52849 mutex_lock(&ttyj->ipw_tty_mutex);
52850 }
52851- while (ttyj->port.count)
52852+ while (atomic_read(&ttyj->port.count))
52853 do_ipw_close(ttyj);
52854 ipwireless_disassociate_network_ttys(network,
52855 ttyj->channel_idx);
52856diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52857index 14c54e0..1efd4f2 100644
52858--- a/drivers/tty/moxa.c
52859+++ b/drivers/tty/moxa.c
52860@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52861 }
52862
52863 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52864- ch->port.count++;
52865+ atomic_inc(&ch->port.count);
52866 tty->driver_data = ch;
52867 tty_port_tty_set(&ch->port, tty);
52868 mutex_lock(&ch->port.mutex);
52869diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52870index c434376..114ce13 100644
52871--- a/drivers/tty/n_gsm.c
52872+++ b/drivers/tty/n_gsm.c
52873@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52874 spin_lock_init(&dlci->lock);
52875 mutex_init(&dlci->mutex);
52876 dlci->fifo = &dlci->_fifo;
52877- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52878+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52879 kfree(dlci);
52880 return NULL;
52881 }
52882@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52883 struct gsm_dlci *dlci = tty->driver_data;
52884 struct tty_port *port = &dlci->port;
52885
52886- port->count++;
52887+ atomic_inc(&port->count);
52888 tty_port_tty_set(port, tty);
52889
52890 dlci->modem_rx = 0;
52891diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52892index 4ddfa60..1b7e112 100644
52893--- a/drivers/tty/n_tty.c
52894+++ b/drivers/tty/n_tty.c
52895@@ -115,7 +115,7 @@ struct n_tty_data {
52896 int minimum_to_wake;
52897
52898 /* consumer-published */
52899- size_t read_tail;
52900+ size_t read_tail __intentional_overflow(-1);
52901 size_t line_start;
52902
52903 /* protected by output lock */
52904@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52905 {
52906 *ops = tty_ldisc_N_TTY;
52907 ops->owner = NULL;
52908- ops->refcount = ops->flags = 0;
52909+ atomic_set(&ops->refcount, 0);
52910+ ops->flags = 0;
52911 }
52912 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52913diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52914index 6e1f150..c3ba598 100644
52915--- a/drivers/tty/pty.c
52916+++ b/drivers/tty/pty.c
52917@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52918 panic("Couldn't register Unix98 pts driver");
52919
52920 /* Now create the /dev/ptmx special device */
52921+ pax_open_kernel();
52922 tty_default_fops(&ptmx_fops);
52923- ptmx_fops.open = ptmx_open;
52924+ *(void **)&ptmx_fops.open = ptmx_open;
52925+ pax_close_kernel();
52926
52927 cdev_init(&ptmx_cdev, &ptmx_fops);
52928 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
52929diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
52930index 383c4c7..d408e21 100644
52931--- a/drivers/tty/rocket.c
52932+++ b/drivers/tty/rocket.c
52933@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52934 tty->driver_data = info;
52935 tty_port_tty_set(port, tty);
52936
52937- if (port->count++ == 0) {
52938+ if (atomic_inc_return(&port->count) == 1) {
52939 atomic_inc(&rp_num_ports_open);
52940
52941 #ifdef ROCKET_DEBUG_OPEN
52942@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52943 #endif
52944 }
52945 #ifdef ROCKET_DEBUG_OPEN
52946- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
52947+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
52948 #endif
52949
52950 /*
52951@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
52952 spin_unlock_irqrestore(&info->port.lock, flags);
52953 return;
52954 }
52955- if (info->port.count)
52956+ if (atomic_read(&info->port.count))
52957 atomic_dec(&rp_num_ports_open);
52958 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
52959 spin_unlock_irqrestore(&info->port.lock, flags);
52960diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
52961index aa28209..e08fb85 100644
52962--- a/drivers/tty/serial/ioc4_serial.c
52963+++ b/drivers/tty/serial/ioc4_serial.c
52964@@ -437,7 +437,7 @@ struct ioc4_soft {
52965 } is_intr_info[MAX_IOC4_INTR_ENTS];
52966
52967 /* Number of entries active in the above array */
52968- atomic_t is_num_intrs;
52969+ atomic_unchecked_t is_num_intrs;
52970 } is_intr_type[IOC4_NUM_INTR_TYPES];
52971
52972 /* is_ir_lock must be held while
52973@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
52974 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
52975 || (type == IOC4_OTHER_INTR_TYPE)));
52976
52977- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
52978+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
52979 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
52980
52981 /* Save off the lower level interrupt handler */
52982@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
52983
52984 soft = arg;
52985 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
52986- num_intrs = (int)atomic_read(
52987+ num_intrs = (int)atomic_read_unchecked(
52988 &soft->is_intr_type[intr_type].is_num_intrs);
52989
52990 this_mir = this_ir = pending_intrs(soft, intr_type);
52991diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
52992index 129dc5b..1da5bb8 100644
52993--- a/drivers/tty/serial/kgdb_nmi.c
52994+++ b/drivers/tty/serial/kgdb_nmi.c
52995@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
52996 * I/O utilities that messages sent to the console will automatically
52997 * be displayed on the dbg_io.
52998 */
52999- dbg_io_ops->is_console = true;
53000+ pax_open_kernel();
53001+ *(int *)&dbg_io_ops->is_console = true;
53002+ pax_close_kernel();
53003
53004 return 0;
53005 }
53006diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53007index a260cde..6b2b5ce 100644
53008--- a/drivers/tty/serial/kgdboc.c
53009+++ b/drivers/tty/serial/kgdboc.c
53010@@ -24,8 +24,9 @@
53011 #define MAX_CONFIG_LEN 40
53012
53013 static struct kgdb_io kgdboc_io_ops;
53014+static struct kgdb_io kgdboc_io_ops_console;
53015
53016-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53017+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53018 static int configured = -1;
53019
53020 static char config[MAX_CONFIG_LEN];
53021@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53022 kgdboc_unregister_kbd();
53023 if (configured == 1)
53024 kgdb_unregister_io_module(&kgdboc_io_ops);
53025+ else if (configured == 2)
53026+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53027 }
53028
53029 static int configure_kgdboc(void)
53030@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53031 int err;
53032 char *cptr = config;
53033 struct console *cons;
53034+ int is_console = 0;
53035
53036 err = kgdboc_option_setup(config);
53037 if (err || !strlen(config) || isspace(config[0]))
53038 goto noconfig;
53039
53040 err = -ENODEV;
53041- kgdboc_io_ops.is_console = 0;
53042 kgdb_tty_driver = NULL;
53043
53044 kgdboc_use_kms = 0;
53045@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53046 int idx;
53047 if (cons->device && cons->device(cons, &idx) == p &&
53048 idx == tty_line) {
53049- kgdboc_io_ops.is_console = 1;
53050+ is_console = 1;
53051 break;
53052 }
53053 cons = cons->next;
53054@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53055 kgdb_tty_line = tty_line;
53056
53057 do_register:
53058- err = kgdb_register_io_module(&kgdboc_io_ops);
53059+ if (is_console) {
53060+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53061+ configured = 2;
53062+ } else {
53063+ err = kgdb_register_io_module(&kgdboc_io_ops);
53064+ configured = 1;
53065+ }
53066 if (err)
53067 goto noconfig;
53068
53069@@ -205,8 +214,6 @@ do_register:
53070 if (err)
53071 goto nmi_con_failed;
53072
53073- configured = 1;
53074-
53075 return 0;
53076
53077 nmi_con_failed:
53078@@ -223,7 +230,7 @@ noconfig:
53079 static int __init init_kgdboc(void)
53080 {
53081 /* Already configured? */
53082- if (configured == 1)
53083+ if (configured >= 1)
53084 return 0;
53085
53086 return configure_kgdboc();
53087@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53088 if (config[len - 1] == '\n')
53089 config[len - 1] = '\0';
53090
53091- if (configured == 1)
53092+ if (configured >= 1)
53093 cleanup_kgdboc();
53094
53095 /* Go and configure with the new params. */
53096@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53097 .post_exception = kgdboc_post_exp_handler,
53098 };
53099
53100+static struct kgdb_io kgdboc_io_ops_console = {
53101+ .name = "kgdboc",
53102+ .read_char = kgdboc_get_char,
53103+ .write_char = kgdboc_put_char,
53104+ .pre_exception = kgdboc_pre_exp_handler,
53105+ .post_exception = kgdboc_post_exp_handler,
53106+ .is_console = 1
53107+};
53108+
53109 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53110 /* This is only available if kgdboc is a built in for early debugging */
53111 static int __init kgdboc_early_init(char *opt)
53112diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53113index c88b522..e763029 100644
53114--- a/drivers/tty/serial/msm_serial.c
53115+++ b/drivers/tty/serial/msm_serial.c
53116@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53117 .cons = MSM_CONSOLE,
53118 };
53119
53120-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53121+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53122
53123 static const struct of_device_id msm_uartdm_table[] = {
53124 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53125@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53126 line = pdev->id;
53127
53128 if (line < 0)
53129- line = atomic_inc_return(&msm_uart_next_id) - 1;
53130+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53131
53132 if (unlikely(line < 0 || line >= UART_NR))
53133 return -ENXIO;
53134diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53135index 107e807..d4a02fa 100644
53136--- a/drivers/tty/serial/samsung.c
53137+++ b/drivers/tty/serial/samsung.c
53138@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53139 }
53140 }
53141
53142+static int s3c64xx_serial_startup(struct uart_port *port);
53143 static int s3c24xx_serial_startup(struct uart_port *port)
53144 {
53145 struct s3c24xx_uart_port *ourport = to_ourport(port);
53146 int ret;
53147
53148+ /* Startup sequence is different for s3c64xx and higher SoC's */
53149+ if (s3c24xx_serial_has_interrupt_mask(port))
53150+ return s3c64xx_serial_startup(port);
53151+
53152 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53153 port, (unsigned long long)port->mapbase, port->membase);
53154
53155@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53156 /* setup info for port */
53157 port->dev = &platdev->dev;
53158
53159- /* Startup sequence is different for s3c64xx and higher SoC's */
53160- if (s3c24xx_serial_has_interrupt_mask(port))
53161- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53162-
53163 port->uartclk = 1;
53164
53165 if (cfg->uart_flags & UPF_CONS_FLOW) {
53166diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53167index 984605b..e538330 100644
53168--- a/drivers/tty/serial/serial_core.c
53169+++ b/drivers/tty/serial/serial_core.c
53170@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53171 state = drv->state + tty->index;
53172 port = &state->port;
53173 spin_lock_irq(&port->lock);
53174- --port->count;
53175+ atomic_dec(&port->count);
53176 spin_unlock_irq(&port->lock);
53177 return;
53178 }
53179@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53180
53181 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53182
53183- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53184+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53185 return;
53186
53187 /*
53188@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53189 uart_flush_buffer(tty);
53190 uart_shutdown(tty, state);
53191 spin_lock_irqsave(&port->lock, flags);
53192- port->count = 0;
53193+ atomic_set(&port->count, 0);
53194 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53195 spin_unlock_irqrestore(&port->lock, flags);
53196 tty_port_tty_set(port, NULL);
53197@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53198 pr_debug("uart_open(%d) called\n", line);
53199
53200 spin_lock_irq(&port->lock);
53201- ++port->count;
53202+ atomic_inc(&port->count);
53203 spin_unlock_irq(&port->lock);
53204
53205 /*
53206diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53207index b799170..87dafd5 100644
53208--- a/drivers/tty/synclink.c
53209+++ b/drivers/tty/synclink.c
53210@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53211
53212 if (debug_level >= DEBUG_LEVEL_INFO)
53213 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53214- __FILE__,__LINE__, info->device_name, info->port.count);
53215+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53216
53217 if (tty_port_close_start(&info->port, tty, filp) == 0)
53218 goto cleanup;
53219@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53220 cleanup:
53221 if (debug_level >= DEBUG_LEVEL_INFO)
53222 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53223- tty->driver->name, info->port.count);
53224+ tty->driver->name, atomic_read(&info->port.count));
53225
53226 } /* end of mgsl_close() */
53227
53228@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53229
53230 mgsl_flush_buffer(tty);
53231 shutdown(info);
53232-
53233- info->port.count = 0;
53234+
53235+ atomic_set(&info->port.count, 0);
53236 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53237 info->port.tty = NULL;
53238
53239@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53240
53241 if (debug_level >= DEBUG_LEVEL_INFO)
53242 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53243- __FILE__,__LINE__, tty->driver->name, port->count );
53244+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53245
53246 spin_lock_irqsave(&info->irq_spinlock, flags);
53247- port->count--;
53248+ atomic_dec(&port->count);
53249 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53250 port->blocked_open++;
53251
53252@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53253
53254 if (debug_level >= DEBUG_LEVEL_INFO)
53255 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53256- __FILE__,__LINE__, tty->driver->name, port->count );
53257+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53258
53259 tty_unlock(tty);
53260 schedule();
53261@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53262
53263 /* FIXME: Racy on hangup during close wait */
53264 if (!tty_hung_up_p(filp))
53265- port->count++;
53266+ atomic_inc(&port->count);
53267 port->blocked_open--;
53268
53269 if (debug_level >= DEBUG_LEVEL_INFO)
53270 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53271- __FILE__,__LINE__, tty->driver->name, port->count );
53272+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53273
53274 if (!retval)
53275 port->flags |= ASYNC_NORMAL_ACTIVE;
53276@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53277
53278 if (debug_level >= DEBUG_LEVEL_INFO)
53279 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53280- __FILE__,__LINE__,tty->driver->name, info->port.count);
53281+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53282
53283 /* If port is closing, signal caller to try again */
53284 if (info->port.flags & ASYNC_CLOSING){
53285@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53286 spin_unlock_irqrestore(&info->netlock, flags);
53287 goto cleanup;
53288 }
53289- info->port.count++;
53290+ atomic_inc(&info->port.count);
53291 spin_unlock_irqrestore(&info->netlock, flags);
53292
53293- if (info->port.count == 1) {
53294+ if (atomic_read(&info->port.count) == 1) {
53295 /* 1st open on this device, init hardware */
53296 retval = startup(info);
53297 if (retval < 0)
53298@@ -3442,8 +3442,8 @@ cleanup:
53299 if (retval) {
53300 if (tty->count == 1)
53301 info->port.tty = NULL; /* tty layer will release tty struct */
53302- if(info->port.count)
53303- info->port.count--;
53304+ if (atomic_read(&info->port.count))
53305+ atomic_dec(&info->port.count);
53306 }
53307
53308 return retval;
53309@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53310 unsigned short new_crctype;
53311
53312 /* return error if TTY interface open */
53313- if (info->port.count)
53314+ if (atomic_read(&info->port.count))
53315 return -EBUSY;
53316
53317 switch (encoding)
53318@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53319
53320 /* arbitrate between network and tty opens */
53321 spin_lock_irqsave(&info->netlock, flags);
53322- if (info->port.count != 0 || info->netcount != 0) {
53323+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53324 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53325 spin_unlock_irqrestore(&info->netlock, flags);
53326 return -EBUSY;
53327@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53328 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53329
53330 /* return error if TTY interface open */
53331- if (info->port.count)
53332+ if (atomic_read(&info->port.count))
53333 return -EBUSY;
53334
53335 if (cmd != SIOCWANDEV)
53336diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53337index 0e8c39b..e0cb171 100644
53338--- a/drivers/tty/synclink_gt.c
53339+++ b/drivers/tty/synclink_gt.c
53340@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53341 tty->driver_data = info;
53342 info->port.tty = tty;
53343
53344- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53345+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53346
53347 /* If port is closing, signal caller to try again */
53348 if (info->port.flags & ASYNC_CLOSING){
53349@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53350 mutex_unlock(&info->port.mutex);
53351 goto cleanup;
53352 }
53353- info->port.count++;
53354+ atomic_inc(&info->port.count);
53355 spin_unlock_irqrestore(&info->netlock, flags);
53356
53357- if (info->port.count == 1) {
53358+ if (atomic_read(&info->port.count) == 1) {
53359 /* 1st open on this device, init hardware */
53360 retval = startup(info);
53361 if (retval < 0) {
53362@@ -715,8 +715,8 @@ cleanup:
53363 if (retval) {
53364 if (tty->count == 1)
53365 info->port.tty = NULL; /* tty layer will release tty struct */
53366- if(info->port.count)
53367- info->port.count--;
53368+ if(atomic_read(&info->port.count))
53369+ atomic_dec(&info->port.count);
53370 }
53371
53372 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53373@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53374
53375 if (sanity_check(info, tty->name, "close"))
53376 return;
53377- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53378+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53379
53380 if (tty_port_close_start(&info->port, tty, filp) == 0)
53381 goto cleanup;
53382@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53383 tty_port_close_end(&info->port, tty);
53384 info->port.tty = NULL;
53385 cleanup:
53386- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53387+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53388 }
53389
53390 static void hangup(struct tty_struct *tty)
53391@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53392 shutdown(info);
53393
53394 spin_lock_irqsave(&info->port.lock, flags);
53395- info->port.count = 0;
53396+ atomic_set(&info->port.count, 0);
53397 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53398 info->port.tty = NULL;
53399 spin_unlock_irqrestore(&info->port.lock, flags);
53400@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53401 unsigned short new_crctype;
53402
53403 /* return error if TTY interface open */
53404- if (info->port.count)
53405+ if (atomic_read(&info->port.count))
53406 return -EBUSY;
53407
53408 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53409@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53410
53411 /* arbitrate between network and tty opens */
53412 spin_lock_irqsave(&info->netlock, flags);
53413- if (info->port.count != 0 || info->netcount != 0) {
53414+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53415 DBGINFO(("%s hdlc_open busy\n", dev->name));
53416 spin_unlock_irqrestore(&info->netlock, flags);
53417 return -EBUSY;
53418@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53419 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53420
53421 /* return error if TTY interface open */
53422- if (info->port.count)
53423+ if (atomic_read(&info->port.count))
53424 return -EBUSY;
53425
53426 if (cmd != SIOCWANDEV)
53427@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53428 if (port == NULL)
53429 continue;
53430 spin_lock(&port->lock);
53431- if ((port->port.count || port->netcount) &&
53432+ if ((atomic_read(&port->port.count) || port->netcount) &&
53433 port->pending_bh && !port->bh_running &&
53434 !port->bh_requested) {
53435 DBGISR(("%s bh queued\n", port->device_name));
53436@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53437 add_wait_queue(&port->open_wait, &wait);
53438
53439 spin_lock_irqsave(&info->lock, flags);
53440- port->count--;
53441+ atomic_dec(&port->count);
53442 spin_unlock_irqrestore(&info->lock, flags);
53443 port->blocked_open++;
53444
53445@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53446 remove_wait_queue(&port->open_wait, &wait);
53447
53448 if (!tty_hung_up_p(filp))
53449- port->count++;
53450+ atomic_inc(&port->count);
53451 port->blocked_open--;
53452
53453 if (!retval)
53454diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53455index c3f9091..abe4601 100644
53456--- a/drivers/tty/synclinkmp.c
53457+++ b/drivers/tty/synclinkmp.c
53458@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53459
53460 if (debug_level >= DEBUG_LEVEL_INFO)
53461 printk("%s(%d):%s open(), old ref count = %d\n",
53462- __FILE__,__LINE__,tty->driver->name, info->port.count);
53463+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53464
53465 /* If port is closing, signal caller to try again */
53466 if (info->port.flags & ASYNC_CLOSING){
53467@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53468 spin_unlock_irqrestore(&info->netlock, flags);
53469 goto cleanup;
53470 }
53471- info->port.count++;
53472+ atomic_inc(&info->port.count);
53473 spin_unlock_irqrestore(&info->netlock, flags);
53474
53475- if (info->port.count == 1) {
53476+ if (atomic_read(&info->port.count) == 1) {
53477 /* 1st open on this device, init hardware */
53478 retval = startup(info);
53479 if (retval < 0)
53480@@ -796,8 +796,8 @@ cleanup:
53481 if (retval) {
53482 if (tty->count == 1)
53483 info->port.tty = NULL; /* tty layer will release tty struct */
53484- if(info->port.count)
53485- info->port.count--;
53486+ if(atomic_read(&info->port.count))
53487+ atomic_dec(&info->port.count);
53488 }
53489
53490 return retval;
53491@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53492
53493 if (debug_level >= DEBUG_LEVEL_INFO)
53494 printk("%s(%d):%s close() entry, count=%d\n",
53495- __FILE__,__LINE__, info->device_name, info->port.count);
53496+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53497
53498 if (tty_port_close_start(&info->port, tty, filp) == 0)
53499 goto cleanup;
53500@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53501 cleanup:
53502 if (debug_level >= DEBUG_LEVEL_INFO)
53503 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53504- tty->driver->name, info->port.count);
53505+ tty->driver->name, atomic_read(&info->port.count));
53506 }
53507
53508 /* Called by tty_hangup() when a hangup is signaled.
53509@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53510 shutdown(info);
53511
53512 spin_lock_irqsave(&info->port.lock, flags);
53513- info->port.count = 0;
53514+ atomic_set(&info->port.count, 0);
53515 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53516 info->port.tty = NULL;
53517 spin_unlock_irqrestore(&info->port.lock, flags);
53518@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53519 unsigned short new_crctype;
53520
53521 /* return error if TTY interface open */
53522- if (info->port.count)
53523+ if (atomic_read(&info->port.count))
53524 return -EBUSY;
53525
53526 switch (encoding)
53527@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53528
53529 /* arbitrate between network and tty opens */
53530 spin_lock_irqsave(&info->netlock, flags);
53531- if (info->port.count != 0 || info->netcount != 0) {
53532+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53533 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53534 spin_unlock_irqrestore(&info->netlock, flags);
53535 return -EBUSY;
53536@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53537 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53538
53539 /* return error if TTY interface open */
53540- if (info->port.count)
53541+ if (atomic_read(&info->port.count))
53542 return -EBUSY;
53543
53544 if (cmd != SIOCWANDEV)
53545@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53546 * do not request bottom half processing if the
53547 * device is not open in a normal mode.
53548 */
53549- if ( port && (port->port.count || port->netcount) &&
53550+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53551 port->pending_bh && !port->bh_running &&
53552 !port->bh_requested ) {
53553 if ( debug_level >= DEBUG_LEVEL_ISR )
53554@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53555
53556 if (debug_level >= DEBUG_LEVEL_INFO)
53557 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53558- __FILE__,__LINE__, tty->driver->name, port->count );
53559+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53560
53561 spin_lock_irqsave(&info->lock, flags);
53562- port->count--;
53563+ atomic_dec(&port->count);
53564 spin_unlock_irqrestore(&info->lock, flags);
53565 port->blocked_open++;
53566
53567@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53568
53569 if (debug_level >= DEBUG_LEVEL_INFO)
53570 printk("%s(%d):%s block_til_ready() count=%d\n",
53571- __FILE__,__LINE__, tty->driver->name, port->count );
53572+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53573
53574 tty_unlock(tty);
53575 schedule();
53576@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53577 set_current_state(TASK_RUNNING);
53578 remove_wait_queue(&port->open_wait, &wait);
53579 if (!tty_hung_up_p(filp))
53580- port->count++;
53581+ atomic_inc(&port->count);
53582 port->blocked_open--;
53583
53584 if (debug_level >= DEBUG_LEVEL_INFO)
53585 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53586- __FILE__,__LINE__, tty->driver->name, port->count );
53587+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53588
53589 if (!retval)
53590 port->flags |= ASYNC_NORMAL_ACTIVE;
53591diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53592index 42bad18..447d7a2 100644
53593--- a/drivers/tty/sysrq.c
53594+++ b/drivers/tty/sysrq.c
53595@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53596 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53597 size_t count, loff_t *ppos)
53598 {
53599- if (count) {
53600+ if (count && capable(CAP_SYS_ADMIN)) {
53601 char c;
53602
53603 if (get_user(c, buf))
53604diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53605index 2bb4dfc..a7f6e86 100644
53606--- a/drivers/tty/tty_io.c
53607+++ b/drivers/tty/tty_io.c
53608@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53609
53610 void tty_default_fops(struct file_operations *fops)
53611 {
53612- *fops = tty_fops;
53613+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53614 }
53615
53616 /*
53617diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53618index 3737f55..7cef448 100644
53619--- a/drivers/tty/tty_ldisc.c
53620+++ b/drivers/tty/tty_ldisc.c
53621@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53622 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53623 tty_ldiscs[disc] = new_ldisc;
53624 new_ldisc->num = disc;
53625- new_ldisc->refcount = 0;
53626+ atomic_set(&new_ldisc->refcount, 0);
53627 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53628
53629 return ret;
53630@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53631 return -EINVAL;
53632
53633 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53634- if (tty_ldiscs[disc]->refcount)
53635+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53636 ret = -EBUSY;
53637 else
53638 tty_ldiscs[disc] = NULL;
53639@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53640 if (ldops) {
53641 ret = ERR_PTR(-EAGAIN);
53642 if (try_module_get(ldops->owner)) {
53643- ldops->refcount++;
53644+ atomic_inc(&ldops->refcount);
53645 ret = ldops;
53646 }
53647 }
53648@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53649 unsigned long flags;
53650
53651 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53652- ldops->refcount--;
53653+ atomic_dec(&ldops->refcount);
53654 module_put(ldops->owner);
53655 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53656 }
53657diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53658index 40b31835..94d92ae 100644
53659--- a/drivers/tty/tty_port.c
53660+++ b/drivers/tty/tty_port.c
53661@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53662 unsigned long flags;
53663
53664 spin_lock_irqsave(&port->lock, flags);
53665- port->count = 0;
53666+ atomic_set(&port->count, 0);
53667 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53668 tty = port->tty;
53669 if (tty)
53670@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53671
53672 /* The port lock protects the port counts */
53673 spin_lock_irqsave(&port->lock, flags);
53674- port->count--;
53675+ atomic_dec(&port->count);
53676 port->blocked_open++;
53677 spin_unlock_irqrestore(&port->lock, flags);
53678
53679@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53680 we must not mess that up further */
53681 spin_lock_irqsave(&port->lock, flags);
53682 if (!tty_hung_up_p(filp))
53683- port->count++;
53684+ atomic_inc(&port->count);
53685 port->blocked_open--;
53686 if (retval == 0)
53687 port->flags |= ASYNC_NORMAL_ACTIVE;
53688@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53689 return 0;
53690
53691 spin_lock_irqsave(&port->lock, flags);
53692- if (tty->count == 1 && port->count != 1) {
53693+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53694 printk(KERN_WARNING
53695 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53696- port->count);
53697- port->count = 1;
53698+ atomic_read(&port->count));
53699+ atomic_set(&port->count, 1);
53700 }
53701- if (--port->count < 0) {
53702+ if (atomic_dec_return(&port->count) < 0) {
53703 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53704- port->count);
53705- port->count = 0;
53706+ atomic_read(&port->count));
53707+ atomic_set(&port->count, 0);
53708 }
53709
53710- if (port->count) {
53711+ if (atomic_read(&port->count)) {
53712 spin_unlock_irqrestore(&port->lock, flags);
53713 return 0;
53714 }
53715@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53716 struct file *filp)
53717 {
53718 spin_lock_irq(&port->lock);
53719- ++port->count;
53720+ atomic_inc(&port->count);
53721 spin_unlock_irq(&port->lock);
53722 tty_port_tty_set(port, tty);
53723
53724diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53725index 8a89f6e..50b32af 100644
53726--- a/drivers/tty/vt/keyboard.c
53727+++ b/drivers/tty/vt/keyboard.c
53728@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53729 kbd->kbdmode == VC_OFF) &&
53730 value != KVAL(K_SAK))
53731 return; /* SAK is allowed even in raw mode */
53732+
53733+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53734+ {
53735+ void *func = fn_handler[value];
53736+ if (func == fn_show_state || func == fn_show_ptregs ||
53737+ func == fn_show_mem)
53738+ return;
53739+ }
53740+#endif
53741+
53742 fn_handler[value](vc);
53743 }
53744
53745@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53746 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53747 return -EFAULT;
53748
53749- if (!capable(CAP_SYS_TTY_CONFIG))
53750- perm = 0;
53751-
53752 switch (cmd) {
53753 case KDGKBENT:
53754 /* Ensure another thread doesn't free it under us */
53755@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53756 spin_unlock_irqrestore(&kbd_event_lock, flags);
53757 return put_user(val, &user_kbe->kb_value);
53758 case KDSKBENT:
53759+ if (!capable(CAP_SYS_TTY_CONFIG))
53760+ perm = 0;
53761+
53762 if (!perm)
53763 return -EPERM;
53764 if (!i && v == K_NOSUCHMAP) {
53765@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53766 int i, j, k;
53767 int ret;
53768
53769- if (!capable(CAP_SYS_TTY_CONFIG))
53770- perm = 0;
53771-
53772 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53773 if (!kbs) {
53774 ret = -ENOMEM;
53775@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53776 kfree(kbs);
53777 return ((p && *p) ? -EOVERFLOW : 0);
53778 case KDSKBSENT:
53779+ if (!capable(CAP_SYS_TTY_CONFIG))
53780+ perm = 0;
53781+
53782 if (!perm) {
53783 ret = -EPERM;
53784 goto reterr;
53785diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53786index 6276f13..84f2449 100644
53787--- a/drivers/uio/uio.c
53788+++ b/drivers/uio/uio.c
53789@@ -25,6 +25,7 @@
53790 #include <linux/kobject.h>
53791 #include <linux/cdev.h>
53792 #include <linux/uio_driver.h>
53793+#include <asm/local.h>
53794
53795 #define UIO_MAX_DEVICES (1U << MINORBITS)
53796
53797@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53798 struct device_attribute *attr, char *buf)
53799 {
53800 struct uio_device *idev = dev_get_drvdata(dev);
53801- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53802+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53803 }
53804 static DEVICE_ATTR_RO(event);
53805
53806@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53807 {
53808 struct uio_device *idev = info->uio_dev;
53809
53810- atomic_inc(&idev->event);
53811+ atomic_inc_unchecked(&idev->event);
53812 wake_up_interruptible(&idev->wait);
53813 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53814 }
53815@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53816 }
53817
53818 listener->dev = idev;
53819- listener->event_count = atomic_read(&idev->event);
53820+ listener->event_count = atomic_read_unchecked(&idev->event);
53821 filep->private_data = listener;
53822
53823 if (idev->info->open) {
53824@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53825 return -EIO;
53826
53827 poll_wait(filep, &idev->wait, wait);
53828- if (listener->event_count != atomic_read(&idev->event))
53829+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53830 return POLLIN | POLLRDNORM;
53831 return 0;
53832 }
53833@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53834 do {
53835 set_current_state(TASK_INTERRUPTIBLE);
53836
53837- event_count = atomic_read(&idev->event);
53838+ event_count = atomic_read_unchecked(&idev->event);
53839 if (event_count != listener->event_count) {
53840 if (copy_to_user(buf, &event_count, count))
53841 retval = -EFAULT;
53842@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53843 static int uio_find_mem_index(struct vm_area_struct *vma)
53844 {
53845 struct uio_device *idev = vma->vm_private_data;
53846+ unsigned long size;
53847
53848 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53849- if (idev->info->mem[vma->vm_pgoff].size == 0)
53850+ size = idev->info->mem[vma->vm_pgoff].size;
53851+ if (size == 0)
53852+ return -1;
53853+ if (vma->vm_end - vma->vm_start > size)
53854 return -1;
53855 return (int)vma->vm_pgoff;
53856 }
53857@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53858 idev->owner = owner;
53859 idev->info = info;
53860 init_waitqueue_head(&idev->wait);
53861- atomic_set(&idev->event, 0);
53862+ atomic_set_unchecked(&idev->event, 0);
53863
53864 ret = uio_get_minor(idev);
53865 if (ret)
53866diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53867index 813d4d3..a71934f 100644
53868--- a/drivers/usb/atm/cxacru.c
53869+++ b/drivers/usb/atm/cxacru.c
53870@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53871 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53872 if (ret < 2)
53873 return -EINVAL;
53874- if (index < 0 || index > 0x7f)
53875+ if (index > 0x7f)
53876 return -EINVAL;
53877 pos += tmp;
53878
53879diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53880index dada014..1d0d517 100644
53881--- a/drivers/usb/atm/usbatm.c
53882+++ b/drivers/usb/atm/usbatm.c
53883@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53884 if (printk_ratelimit())
53885 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53886 __func__, vpi, vci);
53887- atomic_inc(&vcc->stats->rx_err);
53888+ atomic_inc_unchecked(&vcc->stats->rx_err);
53889 return;
53890 }
53891
53892@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53893 if (length > ATM_MAX_AAL5_PDU) {
53894 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53895 __func__, length, vcc);
53896- atomic_inc(&vcc->stats->rx_err);
53897+ atomic_inc_unchecked(&vcc->stats->rx_err);
53898 goto out;
53899 }
53900
53901@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53902 if (sarb->len < pdu_length) {
53903 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53904 __func__, pdu_length, sarb->len, vcc);
53905- atomic_inc(&vcc->stats->rx_err);
53906+ atomic_inc_unchecked(&vcc->stats->rx_err);
53907 goto out;
53908 }
53909
53910 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53911 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53912 __func__, vcc);
53913- atomic_inc(&vcc->stats->rx_err);
53914+ atomic_inc_unchecked(&vcc->stats->rx_err);
53915 goto out;
53916 }
53917
53918@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53919 if (printk_ratelimit())
53920 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53921 __func__, length);
53922- atomic_inc(&vcc->stats->rx_drop);
53923+ atomic_inc_unchecked(&vcc->stats->rx_drop);
53924 goto out;
53925 }
53926
53927@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53928
53929 vcc->push(vcc, skb);
53930
53931- atomic_inc(&vcc->stats->rx);
53932+ atomic_inc_unchecked(&vcc->stats->rx);
53933 out:
53934 skb_trim(sarb, 0);
53935 }
53936@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
53937 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
53938
53939 usbatm_pop(vcc, skb);
53940- atomic_inc(&vcc->stats->tx);
53941+ atomic_inc_unchecked(&vcc->stats->tx);
53942
53943 skb = skb_dequeue(&instance->sndqueue);
53944 }
53945@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
53946 if (!left--)
53947 return sprintf(page,
53948 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
53949- atomic_read(&atm_dev->stats.aal5.tx),
53950- atomic_read(&atm_dev->stats.aal5.tx_err),
53951- atomic_read(&atm_dev->stats.aal5.rx),
53952- atomic_read(&atm_dev->stats.aal5.rx_err),
53953- atomic_read(&atm_dev->stats.aal5.rx_drop));
53954+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
53955+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
53956+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
53957+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
53958+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
53959
53960 if (!left--) {
53961 if (instance->disconnected)
53962diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
53963index 2a3bbdf..91d72cf 100644
53964--- a/drivers/usb/core/devices.c
53965+++ b/drivers/usb/core/devices.c
53966@@ -126,7 +126,7 @@ static const char format_endpt[] =
53967 * time it gets called.
53968 */
53969 static struct device_connect_event {
53970- atomic_t count;
53971+ atomic_unchecked_t count;
53972 wait_queue_head_t wait;
53973 } device_event = {
53974 .count = ATOMIC_INIT(1),
53975@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
53976
53977 void usbfs_conn_disc_event(void)
53978 {
53979- atomic_add(2, &device_event.count);
53980+ atomic_add_unchecked(2, &device_event.count);
53981 wake_up(&device_event.wait);
53982 }
53983
53984@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
53985
53986 poll_wait(file, &device_event.wait, wait);
53987
53988- event_count = atomic_read(&device_event.count);
53989+ event_count = atomic_read_unchecked(&device_event.count);
53990 if (file->f_version != event_count) {
53991 file->f_version = event_count;
53992 return POLLIN | POLLRDNORM;
53993diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
53994index e500243..401300f 100644
53995--- a/drivers/usb/core/devio.c
53996+++ b/drivers/usb/core/devio.c
53997@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53998 struct usb_dev_state *ps = file->private_data;
53999 struct usb_device *dev = ps->dev;
54000 ssize_t ret = 0;
54001- unsigned len;
54002+ size_t len;
54003 loff_t pos;
54004 int i;
54005
54006@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54007 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54008 struct usb_config_descriptor *config =
54009 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54010- unsigned int length = le16_to_cpu(config->wTotalLength);
54011+ size_t length = le16_to_cpu(config->wTotalLength);
54012
54013 if (*ppos < pos + length) {
54014
54015 /* The descriptor may claim to be longer than it
54016 * really is. Here is the actual allocated length. */
54017- unsigned alloclen =
54018+ size_t alloclen =
54019 le16_to_cpu(dev->config[i].desc.wTotalLength);
54020
54021- len = length - (*ppos - pos);
54022+ len = length + pos - *ppos;
54023 if (len > nbytes)
54024 len = nbytes;
54025
54026 /* Simply don't write (skip over) unallocated parts */
54027 if (alloclen > (*ppos - pos)) {
54028- alloclen -= (*ppos - pos);
54029+ alloclen = alloclen + pos - *ppos;
54030 if (copy_to_user(buf,
54031 dev->rawdescriptors[i] + (*ppos - pos),
54032 min(len, alloclen))) {
54033diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54034index 45a915c..09f9735 100644
54035--- a/drivers/usb/core/hcd.c
54036+++ b/drivers/usb/core/hcd.c
54037@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54038 */
54039 usb_get_urb(urb);
54040 atomic_inc(&urb->use_count);
54041- atomic_inc(&urb->dev->urbnum);
54042+ atomic_inc_unchecked(&urb->dev->urbnum);
54043 usbmon_urb_submit(&hcd->self, urb);
54044
54045 /* NOTE requirements on root-hub callers (usbfs and the hub
54046@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54047 urb->hcpriv = NULL;
54048 INIT_LIST_HEAD(&urb->urb_list);
54049 atomic_dec(&urb->use_count);
54050- atomic_dec(&urb->dev->urbnum);
54051+ atomic_dec_unchecked(&urb->dev->urbnum);
54052 if (atomic_read(&urb->reject))
54053 wake_up(&usb_kill_urb_queue);
54054 usb_put_urb(urb);
54055diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54056index b4bfa3a..008f926 100644
54057--- a/drivers/usb/core/hub.c
54058+++ b/drivers/usb/core/hub.c
54059@@ -26,6 +26,7 @@
54060 #include <linux/mutex.h>
54061 #include <linux/random.h>
54062 #include <linux/pm_qos.h>
54063+#include <linux/grsecurity.h>
54064
54065 #include <asm/uaccess.h>
54066 #include <asm/byteorder.h>
54067@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54068 goto done;
54069 return;
54070 }
54071+
54072+ if (gr_handle_new_usb())
54073+ goto done;
54074+
54075 if (hub_is_superspeed(hub->hdev))
54076 unit_load = 150;
54077 else
54078diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54079index f368d20..0c30ac5 100644
54080--- a/drivers/usb/core/message.c
54081+++ b/drivers/usb/core/message.c
54082@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54083 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54084 * error number.
54085 */
54086-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54087+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54088 __u8 requesttype, __u16 value, __u16 index, void *data,
54089 __u16 size, int timeout)
54090 {
54091@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54092 * If successful, 0. Otherwise a negative error number. The number of actual
54093 * bytes transferred will be stored in the @actual_length parameter.
54094 */
54095-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54096+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54097 void *data, int len, int *actual_length, int timeout)
54098 {
54099 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54100@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54101 * bytes transferred will be stored in the @actual_length parameter.
54102 *
54103 */
54104-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54105+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54106 void *data, int len, int *actual_length, int timeout)
54107 {
54108 struct urb *urb;
54109diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54110index d269738..7340cd7 100644
54111--- a/drivers/usb/core/sysfs.c
54112+++ b/drivers/usb/core/sysfs.c
54113@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54114 struct usb_device *udev;
54115
54116 udev = to_usb_device(dev);
54117- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54118+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54119 }
54120 static DEVICE_ATTR_RO(urbnum);
54121
54122diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54123index b1fb9ae..4224885 100644
54124--- a/drivers/usb/core/usb.c
54125+++ b/drivers/usb/core/usb.c
54126@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54127 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54128 dev->state = USB_STATE_ATTACHED;
54129 dev->lpm_disable_count = 1;
54130- atomic_set(&dev->urbnum, 0);
54131+ atomic_set_unchecked(&dev->urbnum, 0);
54132
54133 INIT_LIST_HEAD(&dev->ep0.urb_list);
54134 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54135diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54136index 8cfc319..4868255 100644
54137--- a/drivers/usb/early/ehci-dbgp.c
54138+++ b/drivers/usb/early/ehci-dbgp.c
54139@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54140
54141 #ifdef CONFIG_KGDB
54142 static struct kgdb_io kgdbdbgp_io_ops;
54143-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54144+static struct kgdb_io kgdbdbgp_io_ops_console;
54145+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54146 #else
54147 #define dbgp_kgdb_mode (0)
54148 #endif
54149@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54150 .write_char = kgdbdbgp_write_char,
54151 };
54152
54153+static struct kgdb_io kgdbdbgp_io_ops_console = {
54154+ .name = "kgdbdbgp",
54155+ .read_char = kgdbdbgp_read_char,
54156+ .write_char = kgdbdbgp_write_char,
54157+ .is_console = 1
54158+};
54159+
54160 static int kgdbdbgp_wait_time;
54161
54162 static int __init kgdbdbgp_parse_config(char *str)
54163@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54164 ptr++;
54165 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54166 }
54167- kgdb_register_io_module(&kgdbdbgp_io_ops);
54168- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54169+ if (early_dbgp_console.index != -1)
54170+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54171+ else
54172+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54173
54174 return 0;
54175 }
54176diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54177index e971584..03495ab 100644
54178--- a/drivers/usb/gadget/function/f_uac1.c
54179+++ b/drivers/usb/gadget/function/f_uac1.c
54180@@ -14,6 +14,7 @@
54181 #include <linux/module.h>
54182 #include <linux/device.h>
54183 #include <linux/atomic.h>
54184+#include <linux/module.h>
54185
54186 #include "u_uac1.h"
54187
54188diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54189index 491082a..dfd7d17 100644
54190--- a/drivers/usb/gadget/function/u_serial.c
54191+++ b/drivers/usb/gadget/function/u_serial.c
54192@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54193 spin_lock_irq(&port->port_lock);
54194
54195 /* already open? Great. */
54196- if (port->port.count) {
54197+ if (atomic_read(&port->port.count)) {
54198 status = 0;
54199- port->port.count++;
54200+ atomic_inc(&port->port.count);
54201
54202 /* currently opening/closing? wait ... */
54203 } else if (port->openclose) {
54204@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54205 tty->driver_data = port;
54206 port->port.tty = tty;
54207
54208- port->port.count = 1;
54209+ atomic_set(&port->port.count, 1);
54210 port->openclose = false;
54211
54212 /* if connected, start the I/O stream */
54213@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54214
54215 spin_lock_irq(&port->port_lock);
54216
54217- if (port->port.count != 1) {
54218- if (port->port.count == 0)
54219+ if (atomic_read(&port->port.count) != 1) {
54220+ if (atomic_read(&port->port.count) == 0)
54221 WARN_ON(1);
54222 else
54223- --port->port.count;
54224+ atomic_dec(&port->port.count);
54225 goto exit;
54226 }
54227
54228@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54229 * and sleep if necessary
54230 */
54231 port->openclose = true;
54232- port->port.count = 0;
54233+ atomic_set(&port->port.count, 0);
54234
54235 gser = port->port_usb;
54236 if (gser && gser->disconnect)
54237@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54238 int cond;
54239
54240 spin_lock_irq(&port->port_lock);
54241- cond = (port->port.count == 0) && !port->openclose;
54242+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54243 spin_unlock_irq(&port->port_lock);
54244 return cond;
54245 }
54246@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54247 /* if it's already open, start I/O ... and notify the serial
54248 * protocol about open/close status (connect/disconnect).
54249 */
54250- if (port->port.count) {
54251+ if (atomic_read(&port->port.count)) {
54252 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54253 gs_start_io(port);
54254 if (gser->connect)
54255@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54256
54257 port->port_usb = NULL;
54258 gser->ioport = NULL;
54259- if (port->port.count > 0 || port->openclose) {
54260+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54261 wake_up_interruptible(&port->drain_wait);
54262 if (port->port.tty)
54263 tty_hangup(port->port.tty);
54264@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54265
54266 /* finally, free any unused/unusable I/O buffers */
54267 spin_lock_irqsave(&port->port_lock, flags);
54268- if (port->port.count == 0 && !port->openclose)
54269+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54270 gs_buf_free(&port->port_write_buf);
54271 gs_free_requests(gser->out, &port->read_pool, NULL);
54272 gs_free_requests(gser->out, &port->read_queue, NULL);
54273diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54274index 53842a1..2bef3b6 100644
54275--- a/drivers/usb/gadget/function/u_uac1.c
54276+++ b/drivers/usb/gadget/function/u_uac1.c
54277@@ -17,6 +17,7 @@
54278 #include <linux/ctype.h>
54279 #include <linux/random.h>
54280 #include <linux/syscalls.h>
54281+#include <linux/module.h>
54282
54283 #include "u_uac1.h"
54284
54285diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54286index 118edb7..7a6415f 100644
54287--- a/drivers/usb/host/ehci-hub.c
54288+++ b/drivers/usb/host/ehci-hub.c
54289@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54290 urb->transfer_flags = URB_DIR_IN;
54291 usb_get_urb(urb);
54292 atomic_inc(&urb->use_count);
54293- atomic_inc(&urb->dev->urbnum);
54294+ atomic_inc_unchecked(&urb->dev->urbnum);
54295 urb->setup_dma = dma_map_single(
54296 hcd->self.controller,
54297 urb->setup_packet,
54298@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54299 urb->status = -EINPROGRESS;
54300 usb_get_urb(urb);
54301 atomic_inc(&urb->use_count);
54302- atomic_inc(&urb->dev->urbnum);
54303+ atomic_inc_unchecked(&urb->dev->urbnum);
54304 retval = submit_single_step_set_feature(hcd, urb, 0);
54305 if (!retval && !wait_for_completion_timeout(&done,
54306 msecs_to_jiffies(2000))) {
54307diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54308index 1db0626..4948782 100644
54309--- a/drivers/usb/host/hwa-hc.c
54310+++ b/drivers/usb/host/hwa-hc.c
54311@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54312 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54313 struct wahc *wa = &hwahc->wa;
54314 struct device *dev = &wa->usb_iface->dev;
54315- u8 mas_le[UWB_NUM_MAS/8];
54316+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54317+
54318+ if (mas_le == NULL)
54319+ return -ENOMEM;
54320
54321 /* Set the stream index */
54322 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54323@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54324 WUSB_REQ_SET_WUSB_MAS,
54325 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54326 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54327- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54328+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54329 if (result < 0)
54330 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54331 out:
54332+ kfree(mas_le);
54333+
54334 return result;
54335 }
54336
54337diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54338index b3d245e..99549ed 100644
54339--- a/drivers/usb/misc/appledisplay.c
54340+++ b/drivers/usb/misc/appledisplay.c
54341@@ -84,7 +84,7 @@ struct appledisplay {
54342 struct mutex sysfslock; /* concurrent read and write */
54343 };
54344
54345-static atomic_t count_displays = ATOMIC_INIT(0);
54346+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54347 static struct workqueue_struct *wq;
54348
54349 static void appledisplay_complete(struct urb *urb)
54350@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54351
54352 /* Register backlight device */
54353 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54354- atomic_inc_return(&count_displays) - 1);
54355+ atomic_inc_return_unchecked(&count_displays) - 1);
54356 memset(&props, 0, sizeof(struct backlight_properties));
54357 props.type = BACKLIGHT_RAW;
54358 props.max_brightness = 0xff;
54359diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54360index 29fa1c3..a57b08e 100644
54361--- a/drivers/usb/serial/console.c
54362+++ b/drivers/usb/serial/console.c
54363@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54364
54365 info->port = port;
54366
54367- ++port->port.count;
54368+ atomic_inc(&port->port.count);
54369 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54370 if (serial->type->set_termios) {
54371 /*
54372@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54373 }
54374 /* Now that any required fake tty operations are completed restore
54375 * the tty port count */
54376- --port->port.count;
54377+ atomic_dec(&port->port.count);
54378 /* The console is special in terms of closing the device so
54379 * indicate this port is now acting as a system console. */
54380 port->port.console = 1;
54381@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54382 put_tty:
54383 tty_kref_put(tty);
54384 reset_open_count:
54385- port->port.count = 0;
54386+ atomic_set(&port->port.count, 0);
54387 usb_autopm_put_interface(serial->interface);
54388 error_get_interface:
54389 usb_serial_put(serial);
54390@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54391 static void usb_console_write(struct console *co,
54392 const char *buf, unsigned count)
54393 {
54394- static struct usbcons_info *info = &usbcons_info;
54395+ struct usbcons_info *info = &usbcons_info;
54396 struct usb_serial_port *port = info->port;
54397 struct usb_serial *serial;
54398 int retval = -ENODEV;
54399diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54400index 307e339..6aa97cb 100644
54401--- a/drivers/usb/storage/usb.h
54402+++ b/drivers/usb/storage/usb.h
54403@@ -63,7 +63,7 @@ struct us_unusual_dev {
54404 __u8 useProtocol;
54405 __u8 useTransport;
54406 int (*initFunction)(struct us_data *);
54407-};
54408+} __do_const;
54409
54410
54411 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54412diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54413index a863a98..d272795 100644
54414--- a/drivers/usb/usbip/vhci.h
54415+++ b/drivers/usb/usbip/vhci.h
54416@@ -83,7 +83,7 @@ struct vhci_hcd {
54417 unsigned resuming:1;
54418 unsigned long re_timeout;
54419
54420- atomic_t seqnum;
54421+ atomic_unchecked_t seqnum;
54422
54423 /*
54424 * NOTE:
54425diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54426index 1ae9d40..c62604b 100644
54427--- a/drivers/usb/usbip/vhci_hcd.c
54428+++ b/drivers/usb/usbip/vhci_hcd.c
54429@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54430
54431 spin_lock(&vdev->priv_lock);
54432
54433- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54434+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54435 if (priv->seqnum == 0xffff)
54436 dev_info(&urb->dev->dev, "seqnum max\n");
54437
54438@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54439 return -ENOMEM;
54440 }
54441
54442- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54443+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54444 if (unlink->seqnum == 0xffff)
54445 pr_info("seqnum max\n");
54446
54447@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54448 vdev->rhport = rhport;
54449 }
54450
54451- atomic_set(&vhci->seqnum, 0);
54452+ atomic_set_unchecked(&vhci->seqnum, 0);
54453 spin_lock_init(&vhci->lock);
54454
54455 hcd->power_budget = 0; /* no limit */
54456diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54457index 00e4a54..d676f85 100644
54458--- a/drivers/usb/usbip/vhci_rx.c
54459+++ b/drivers/usb/usbip/vhci_rx.c
54460@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54461 if (!urb) {
54462 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54463 pr_info("max seqnum %d\n",
54464- atomic_read(&the_controller->seqnum));
54465+ atomic_read_unchecked(&the_controller->seqnum));
54466 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54467 return;
54468 }
54469diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54470index edc7267..9f65ce2 100644
54471--- a/drivers/usb/wusbcore/wa-hc.h
54472+++ b/drivers/usb/wusbcore/wa-hc.h
54473@@ -240,7 +240,7 @@ struct wahc {
54474 spinlock_t xfer_list_lock;
54475 struct work_struct xfer_enqueue_work;
54476 struct work_struct xfer_error_work;
54477- atomic_t xfer_id_count;
54478+ atomic_unchecked_t xfer_id_count;
54479
54480 kernel_ulong_t quirks;
54481 };
54482@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54483 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54484 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54485 wa->dto_in_use = 0;
54486- atomic_set(&wa->xfer_id_count, 1);
54487+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54488 /* init the buf in URBs */
54489 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54490 usb_init_urb(&(wa->buf_in_urbs[index]));
54491diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54492index 69af4fd..da390d7 100644
54493--- a/drivers/usb/wusbcore/wa-xfer.c
54494+++ b/drivers/usb/wusbcore/wa-xfer.c
54495@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54496 */
54497 static void wa_xfer_id_init(struct wa_xfer *xfer)
54498 {
54499- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54500+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54501 }
54502
54503 /* Return the xfer's ID. */
54504diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54505index f018d8d..ccab63f 100644
54506--- a/drivers/vfio/vfio.c
54507+++ b/drivers/vfio/vfio.c
54508@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54509 return 0;
54510
54511 /* TODO Prevent device auto probing */
54512- WARN("Device %s added to live group %d!\n", dev_name(dev),
54513+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54514 iommu_group_id(group->iommu_group));
54515
54516 return 0;
54517diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54518index 9484d56..d415d69 100644
54519--- a/drivers/vhost/net.c
54520+++ b/drivers/vhost/net.c
54521@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54522 break;
54523 }
54524 /* TODO: Should check and handle checksum. */
54525-
54526- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54527 if (likely(mergeable) &&
54528- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54529+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54530 offsetof(typeof(hdr), num_buffers),
54531 sizeof hdr.num_buffers)) {
54532 vq_err(vq, "Failed num_buffers write");
54533diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54534index 3bb02c6..a01ff38 100644
54535--- a/drivers/vhost/vringh.c
54536+++ b/drivers/vhost/vringh.c
54537@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54538 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54539 {
54540 __virtio16 v = 0;
54541- int rc = get_user(v, (__force __virtio16 __user *)p);
54542+ int rc = get_user(v, (__force_user __virtio16 *)p);
54543 *val = vringh16_to_cpu(vrh, v);
54544 return rc;
54545 }
54546@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54547 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54548 {
54549 __virtio16 v = cpu_to_vringh16(vrh, val);
54550- return put_user(v, (__force __virtio16 __user *)p);
54551+ return put_user(v, (__force_user __virtio16 *)p);
54552 }
54553
54554 static inline int copydesc_user(void *dst, const void *src, size_t len)
54555 {
54556- return copy_from_user(dst, (__force void __user *)src, len) ?
54557+ return copy_from_user(dst, (void __force_user *)src, len) ?
54558 -EFAULT : 0;
54559 }
54560
54561@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54562 const struct vring_used_elem *src,
54563 unsigned int num)
54564 {
54565- return copy_to_user((__force void __user *)dst, src,
54566+ return copy_to_user((void __force_user *)dst, src,
54567 sizeof(*dst) * num) ? -EFAULT : 0;
54568 }
54569
54570 static inline int xfer_from_user(void *src, void *dst, size_t len)
54571 {
54572- return copy_from_user(dst, (__force void __user *)src, len) ?
54573+ return copy_from_user(dst, (void __force_user *)src, len) ?
54574 -EFAULT : 0;
54575 }
54576
54577 static inline int xfer_to_user(void *dst, void *src, size_t len)
54578 {
54579- return copy_to_user((__force void __user *)dst, src, len) ?
54580+ return copy_to_user((void __force_user *)dst, src, len) ?
54581 -EFAULT : 0;
54582 }
54583
54584@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54585 vrh->last_used_idx = 0;
54586 vrh->vring.num = num;
54587 /* vring expects kernel addresses, but only used via accessors. */
54588- vrh->vring.desc = (__force struct vring_desc *)desc;
54589- vrh->vring.avail = (__force struct vring_avail *)avail;
54590- vrh->vring.used = (__force struct vring_used *)used;
54591+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54592+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54593+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54594 return 0;
54595 }
54596 EXPORT_SYMBOL(vringh_init_user);
54597@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54598
54599 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54600 {
54601- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54602+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54603 return 0;
54604 }
54605
54606diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54607index 84a110a..96312c3 100644
54608--- a/drivers/video/backlight/kb3886_bl.c
54609+++ b/drivers/video/backlight/kb3886_bl.c
54610@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54611 static unsigned long kb3886bl_flags;
54612 #define KB3886BL_SUSPENDED 0x01
54613
54614-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54615+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54616 {
54617 .ident = "Sahara Touch-iT",
54618 .matches = {
54619diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54620index 1b0b233..6f34c2c 100644
54621--- a/drivers/video/fbdev/arcfb.c
54622+++ b/drivers/video/fbdev/arcfb.c
54623@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54624 return -ENOSPC;
54625
54626 err = 0;
54627- if ((count + p) > fbmemlength) {
54628+ if (count > (fbmemlength - p)) {
54629 count = fbmemlength - p;
54630 err = -ENOSPC;
54631 }
54632diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54633index aedf2fb..47c9aca 100644
54634--- a/drivers/video/fbdev/aty/aty128fb.c
54635+++ b/drivers/video/fbdev/aty/aty128fb.c
54636@@ -149,7 +149,7 @@ enum {
54637 };
54638
54639 /* Must match above enum */
54640-static char * const r128_family[] = {
54641+static const char * const r128_family[] = {
54642 "AGP",
54643 "PCI",
54644 "PRO AGP",
54645diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54646index 37ec09b..98f8862 100644
54647--- a/drivers/video/fbdev/aty/atyfb_base.c
54648+++ b/drivers/video/fbdev/aty/atyfb_base.c
54649@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54650 par->accel_flags = var->accel_flags; /* hack */
54651
54652 if (var->accel_flags) {
54653- info->fbops->fb_sync = atyfb_sync;
54654+ pax_open_kernel();
54655+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54656+ pax_close_kernel();
54657 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54658 } else {
54659- info->fbops->fb_sync = NULL;
54660+ pax_open_kernel();
54661+ *(void **)&info->fbops->fb_sync = NULL;
54662+ pax_close_kernel();
54663 info->flags |= FBINFO_HWACCEL_DISABLED;
54664 }
54665
54666diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54667index 2fa0317..4983f2a 100644
54668--- a/drivers/video/fbdev/aty/mach64_cursor.c
54669+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54670@@ -8,6 +8,7 @@
54671 #include "../core/fb_draw.h"
54672
54673 #include <asm/io.h>
54674+#include <asm/pgtable.h>
54675
54676 #ifdef __sparc__
54677 #include <asm/fbio.h>
54678@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54679 info->sprite.buf_align = 16; /* and 64 lines tall. */
54680 info->sprite.flags = FB_PIXMAP_IO;
54681
54682- info->fbops->fb_cursor = atyfb_cursor;
54683+ pax_open_kernel();
54684+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54685+ pax_close_kernel();
54686
54687 return 0;
54688 }
54689diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54690index d6cab1f..112f680 100644
54691--- a/drivers/video/fbdev/core/fb_defio.c
54692+++ b/drivers/video/fbdev/core/fb_defio.c
54693@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54694
54695 BUG_ON(!fbdefio);
54696 mutex_init(&fbdefio->lock);
54697- info->fbops->fb_mmap = fb_deferred_io_mmap;
54698+ pax_open_kernel();
54699+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54700+ pax_close_kernel();
54701 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54702 INIT_LIST_HEAD(&fbdefio->pagelist);
54703 if (fbdefio->delay == 0) /* set a default of 1 s */
54704@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54705 page->mapping = NULL;
54706 }
54707
54708- info->fbops->fb_mmap = NULL;
54709+ *(void **)&info->fbops->fb_mmap = NULL;
54710 mutex_destroy(&fbdefio->lock);
54711 }
54712 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54713diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54714index 0705d88..d9429bf 100644
54715--- a/drivers/video/fbdev/core/fbmem.c
54716+++ b/drivers/video/fbdev/core/fbmem.c
54717@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54718 __u32 data;
54719 int err;
54720
54721- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54722+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54723
54724 data = (__u32) (unsigned long) fix->smem_start;
54725 err |= put_user(data, &fix32->smem_start);
54726diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54727index 4254336..282567e 100644
54728--- a/drivers/video/fbdev/hyperv_fb.c
54729+++ b/drivers/video/fbdev/hyperv_fb.c
54730@@ -240,7 +240,7 @@ static uint screen_fb_size;
54731 static inline int synthvid_send(struct hv_device *hdev,
54732 struct synthvid_msg *msg)
54733 {
54734- static atomic64_t request_id = ATOMIC64_INIT(0);
54735+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54736 int ret;
54737
54738 msg->pipe_hdr.type = PIPE_MSG_DATA;
54739@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54740
54741 ret = vmbus_sendpacket(hdev->channel, msg,
54742 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54743- atomic64_inc_return(&request_id),
54744+ atomic64_inc_return_unchecked(&request_id),
54745 VM_PKT_DATA_INBAND, 0);
54746
54747 if (ret)
54748diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54749index 7672d2e..b56437f 100644
54750--- a/drivers/video/fbdev/i810/i810_accel.c
54751+++ b/drivers/video/fbdev/i810/i810_accel.c
54752@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54753 }
54754 }
54755 printk("ringbuffer lockup!!!\n");
54756+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54757 i810_report_error(mmio);
54758 par->dev_flags |= LOCKUP;
54759 info->pixmap.scan_align = 1;
54760diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54761index a01147f..5d896f8 100644
54762--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54763+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54764@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54765
54766 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54767 struct matrox_switch matrox_mystique = {
54768- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54769+ .preinit = MGA1064_preinit,
54770+ .reset = MGA1064_reset,
54771+ .init = MGA1064_init,
54772+ .restore = MGA1064_restore,
54773 };
54774 EXPORT_SYMBOL(matrox_mystique);
54775 #endif
54776
54777 #ifdef CONFIG_FB_MATROX_G
54778 struct matrox_switch matrox_G100 = {
54779- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54780+ .preinit = MGAG100_preinit,
54781+ .reset = MGAG100_reset,
54782+ .init = MGAG100_init,
54783+ .restore = MGAG100_restore,
54784 };
54785 EXPORT_SYMBOL(matrox_G100);
54786 #endif
54787diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54788index 195ad7c..09743fc 100644
54789--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54790+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54791@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54792 }
54793
54794 struct matrox_switch matrox_millennium = {
54795- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54796+ .preinit = Ti3026_preinit,
54797+ .reset = Ti3026_reset,
54798+ .init = Ti3026_init,
54799+ .restore = Ti3026_restore
54800 };
54801 EXPORT_SYMBOL(matrox_millennium);
54802 #endif
54803diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54804index fe92eed..106e085 100644
54805--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54806+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54807@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54808 struct mb862xxfb_par *par = info->par;
54809
54810 if (info->var.bits_per_pixel == 32) {
54811- info->fbops->fb_fillrect = cfb_fillrect;
54812- info->fbops->fb_copyarea = cfb_copyarea;
54813- info->fbops->fb_imageblit = cfb_imageblit;
54814+ pax_open_kernel();
54815+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54816+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54817+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54818+ pax_close_kernel();
54819 } else {
54820 outreg(disp, GC_L0EM, 3);
54821- info->fbops->fb_fillrect = mb86290fb_fillrect;
54822- info->fbops->fb_copyarea = mb86290fb_copyarea;
54823- info->fbops->fb_imageblit = mb86290fb_imageblit;
54824+ pax_open_kernel();
54825+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54826+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54827+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54828+ pax_close_kernel();
54829 }
54830 outreg(draw, GDC_REG_DRAW_BASE, 0);
54831 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54832diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54833index def0412..fed6529 100644
54834--- a/drivers/video/fbdev/nvidia/nvidia.c
54835+++ b/drivers/video/fbdev/nvidia/nvidia.c
54836@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54837 info->fix.line_length = (info->var.xres_virtual *
54838 info->var.bits_per_pixel) >> 3;
54839 if (info->var.accel_flags) {
54840- info->fbops->fb_imageblit = nvidiafb_imageblit;
54841- info->fbops->fb_fillrect = nvidiafb_fillrect;
54842- info->fbops->fb_copyarea = nvidiafb_copyarea;
54843- info->fbops->fb_sync = nvidiafb_sync;
54844+ pax_open_kernel();
54845+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54846+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54847+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54848+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54849+ pax_close_kernel();
54850 info->pixmap.scan_align = 4;
54851 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54852 info->flags |= FBINFO_READS_FAST;
54853 NVResetGraphics(info);
54854 } else {
54855- info->fbops->fb_imageblit = cfb_imageblit;
54856- info->fbops->fb_fillrect = cfb_fillrect;
54857- info->fbops->fb_copyarea = cfb_copyarea;
54858- info->fbops->fb_sync = NULL;
54859+ pax_open_kernel();
54860+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54861+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54862+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54863+ *(void **)&info->fbops->fb_sync = NULL;
54864+ pax_close_kernel();
54865 info->pixmap.scan_align = 1;
54866 info->flags |= FBINFO_HWACCEL_DISABLED;
54867 info->flags &= ~FBINFO_READS_FAST;
54868@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54869 info->pixmap.size = 8 * 1024;
54870 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54871
54872- if (!hwcur)
54873- info->fbops->fb_cursor = NULL;
54874+ if (!hwcur) {
54875+ pax_open_kernel();
54876+ *(void **)&info->fbops->fb_cursor = NULL;
54877+ pax_close_kernel();
54878+ }
54879
54880 info->var.accel_flags = (!noaccel);
54881
54882diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54883index 2412a0d..294215b 100644
54884--- a/drivers/video/fbdev/omap2/dss/display.c
54885+++ b/drivers/video/fbdev/omap2/dss/display.c
54886@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54887 if (dssdev->name == NULL)
54888 dssdev->name = dssdev->alias;
54889
54890+ pax_open_kernel();
54891 if (drv && drv->get_resolution == NULL)
54892- drv->get_resolution = omapdss_default_get_resolution;
54893+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54894 if (drv && drv->get_recommended_bpp == NULL)
54895- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54896+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54897 if (drv && drv->get_timings == NULL)
54898- drv->get_timings = omapdss_default_get_timings;
54899+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54900+ pax_close_kernel();
54901
54902 mutex_lock(&panel_list_mutex);
54903 list_add_tail(&dssdev->panel_list, &panel_list);
54904diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54905index 83433cb..71e9b98 100644
54906--- a/drivers/video/fbdev/s1d13xxxfb.c
54907+++ b/drivers/video/fbdev/s1d13xxxfb.c
54908@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54909
54910 switch(prod_id) {
54911 case S1D13506_PROD_ID: /* activate acceleration */
54912- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54913- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54914+ pax_open_kernel();
54915+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54916+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54917+ pax_close_kernel();
54918 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54919 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54920 break;
54921diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54922index d3013cd..95b8285 100644
54923--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
54924+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54925@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
54926 }
54927
54928 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
54929- lcdc_sys_write_index,
54930- lcdc_sys_write_data,
54931- lcdc_sys_read_data,
54932+ .write_index = lcdc_sys_write_index,
54933+ .write_data = lcdc_sys_write_data,
54934+ .read_data = lcdc_sys_read_data,
54935 };
54936
54937 static int sh_mobile_lcdc_sginit(struct fb_info *info,
54938diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
54939index 9279e5f..d5f5276 100644
54940--- a/drivers/video/fbdev/smscufx.c
54941+++ b/drivers/video/fbdev/smscufx.c
54942@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54943 fb_deferred_io_cleanup(info);
54944 kfree(info->fbdefio);
54945 info->fbdefio = NULL;
54946- info->fbops->fb_mmap = ufx_ops_mmap;
54947+ pax_open_kernel();
54948+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54949+ pax_close_kernel();
54950 }
54951
54952 pr_debug("released /dev/fb%d user=%d count=%d",
54953diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
54954index ff2b873..626a8d5 100644
54955--- a/drivers/video/fbdev/udlfb.c
54956+++ b/drivers/video/fbdev/udlfb.c
54957@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54958 dlfb_urb_completion(urb);
54959
54960 error:
54961- atomic_add(bytes_sent, &dev->bytes_sent);
54962- atomic_add(bytes_identical, &dev->bytes_identical);
54963- atomic_add(width*height*2, &dev->bytes_rendered);
54964+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54965+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54966+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54967 end_cycles = get_cycles();
54968- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54969+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54970 >> 10)), /* Kcycles */
54971 &dev->cpu_kcycles_used);
54972
54973@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54974 dlfb_urb_completion(urb);
54975
54976 error:
54977- atomic_add(bytes_sent, &dev->bytes_sent);
54978- atomic_add(bytes_identical, &dev->bytes_identical);
54979- atomic_add(bytes_rendered, &dev->bytes_rendered);
54980+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54981+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54982+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54983 end_cycles = get_cycles();
54984- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54985+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54986 >> 10)), /* Kcycles */
54987 &dev->cpu_kcycles_used);
54988 }
54989@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54990 fb_deferred_io_cleanup(info);
54991 kfree(info->fbdefio);
54992 info->fbdefio = NULL;
54993- info->fbops->fb_mmap = dlfb_ops_mmap;
54994+ pax_open_kernel();
54995+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54996+ pax_close_kernel();
54997 }
54998
54999 pr_warn("released /dev/fb%d user=%d count=%d\n",
55000@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55001 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55002 struct dlfb_data *dev = fb_info->par;
55003 return snprintf(buf, PAGE_SIZE, "%u\n",
55004- atomic_read(&dev->bytes_rendered));
55005+ atomic_read_unchecked(&dev->bytes_rendered));
55006 }
55007
55008 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55009@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55010 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55011 struct dlfb_data *dev = fb_info->par;
55012 return snprintf(buf, PAGE_SIZE, "%u\n",
55013- atomic_read(&dev->bytes_identical));
55014+ atomic_read_unchecked(&dev->bytes_identical));
55015 }
55016
55017 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55018@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55019 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55020 struct dlfb_data *dev = fb_info->par;
55021 return snprintf(buf, PAGE_SIZE, "%u\n",
55022- atomic_read(&dev->bytes_sent));
55023+ atomic_read_unchecked(&dev->bytes_sent));
55024 }
55025
55026 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55027@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55028 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55029 struct dlfb_data *dev = fb_info->par;
55030 return snprintf(buf, PAGE_SIZE, "%u\n",
55031- atomic_read(&dev->cpu_kcycles_used));
55032+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55033 }
55034
55035 static ssize_t edid_show(
55036@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55037 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55038 struct dlfb_data *dev = fb_info->par;
55039
55040- atomic_set(&dev->bytes_rendered, 0);
55041- atomic_set(&dev->bytes_identical, 0);
55042- atomic_set(&dev->bytes_sent, 0);
55043- atomic_set(&dev->cpu_kcycles_used, 0);
55044+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55045+ atomic_set_unchecked(&dev->bytes_identical, 0);
55046+ atomic_set_unchecked(&dev->bytes_sent, 0);
55047+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55048
55049 return count;
55050 }
55051diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55052index d32d1c4..46722e6 100644
55053--- a/drivers/video/fbdev/uvesafb.c
55054+++ b/drivers/video/fbdev/uvesafb.c
55055@@ -19,6 +19,7 @@
55056 #include <linux/io.h>
55057 #include <linux/mutex.h>
55058 #include <linux/slab.h>
55059+#include <linux/moduleloader.h>
55060 #include <video/edid.h>
55061 #include <video/uvesafb.h>
55062 #ifdef CONFIG_X86
55063@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55064 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55065 par->pmi_setpal = par->ypan = 0;
55066 } else {
55067+
55068+#ifdef CONFIG_PAX_KERNEXEC
55069+#ifdef CONFIG_MODULES
55070+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55071+#endif
55072+ if (!par->pmi_code) {
55073+ par->pmi_setpal = par->ypan = 0;
55074+ return 0;
55075+ }
55076+#endif
55077+
55078 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55079 + task->t.regs.edi);
55080+
55081+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55082+ pax_open_kernel();
55083+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55084+ pax_close_kernel();
55085+
55086+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55087+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55088+#else
55089 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55090 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55091+#endif
55092+
55093 printk(KERN_INFO "uvesafb: protected mode interface info at "
55094 "%04x:%04x\n",
55095 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55096@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55097 par->ypan = ypan;
55098
55099 if (par->pmi_setpal || par->ypan) {
55100+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55101 if (__supported_pte_mask & _PAGE_NX) {
55102 par->pmi_setpal = par->ypan = 0;
55103 printk(KERN_WARNING "uvesafb: NX protection is active, "
55104 "better not use the PMI.\n");
55105- } else {
55106+ } else
55107+#endif
55108 uvesafb_vbe_getpmi(task, par);
55109- }
55110 }
55111 #else
55112 /* The protected mode interface is not available on non-x86. */
55113@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55114 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55115
55116 /* Disable blanking if the user requested so. */
55117- if (!blank)
55118- info->fbops->fb_blank = NULL;
55119+ if (!blank) {
55120+ pax_open_kernel();
55121+ *(void **)&info->fbops->fb_blank = NULL;
55122+ pax_close_kernel();
55123+ }
55124
55125 /*
55126 * Find out how much IO memory is required for the mode with
55127@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55128 info->flags = FBINFO_FLAG_DEFAULT |
55129 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55130
55131- if (!par->ypan)
55132- info->fbops->fb_pan_display = NULL;
55133+ if (!par->ypan) {
55134+ pax_open_kernel();
55135+ *(void **)&info->fbops->fb_pan_display = NULL;
55136+ pax_close_kernel();
55137+ }
55138 }
55139
55140 static void uvesafb_init_mtrr(struct fb_info *info)
55141@@ -1786,6 +1816,11 @@ out_mode:
55142 out:
55143 kfree(par->vbe_modes);
55144
55145+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55146+ if (par->pmi_code)
55147+ module_memfree_exec(par->pmi_code);
55148+#endif
55149+
55150 framebuffer_release(info);
55151 return err;
55152 }
55153@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55154 kfree(par->vbe_state_orig);
55155 kfree(par->vbe_state_saved);
55156
55157+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55158+ if (par->pmi_code)
55159+ module_memfree_exec(par->pmi_code);
55160+#endif
55161+
55162 framebuffer_release(info);
55163 }
55164 return 0;
55165diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55166index d79a0ac..2d0c3d4 100644
55167--- a/drivers/video/fbdev/vesafb.c
55168+++ b/drivers/video/fbdev/vesafb.c
55169@@ -9,6 +9,7 @@
55170 */
55171
55172 #include <linux/module.h>
55173+#include <linux/moduleloader.h>
55174 #include <linux/kernel.h>
55175 #include <linux/errno.h>
55176 #include <linux/string.h>
55177@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55178 static int vram_total; /* Set total amount of memory */
55179 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55180 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55181-static void (*pmi_start)(void) __read_mostly;
55182-static void (*pmi_pal) (void) __read_mostly;
55183+static void (*pmi_start)(void) __read_only;
55184+static void (*pmi_pal) (void) __read_only;
55185 static int depth __read_mostly;
55186 static int vga_compat __read_mostly;
55187 /* --------------------------------------------------------------------- */
55188@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55189 unsigned int size_remap;
55190 unsigned int size_total;
55191 char *option = NULL;
55192+ void *pmi_code = NULL;
55193
55194 /* ignore error return of fb_get_options */
55195 fb_get_options("vesafb", &option);
55196@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55197 size_remap = size_total;
55198 vesafb_fix.smem_len = size_remap;
55199
55200-#ifndef __i386__
55201- screen_info.vesapm_seg = 0;
55202-#endif
55203-
55204 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55205 printk(KERN_WARNING
55206 "vesafb: cannot reserve video memory at 0x%lx\n",
55207@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55208 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55209 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55210
55211+#ifdef __i386__
55212+
55213+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55214+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55215+ if (!pmi_code)
55216+#elif !defined(CONFIG_PAX_KERNEXEC)
55217+ if (0)
55218+#endif
55219+
55220+#endif
55221+ screen_info.vesapm_seg = 0;
55222+
55223 if (screen_info.vesapm_seg) {
55224- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55225- screen_info.vesapm_seg,screen_info.vesapm_off);
55226+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55227+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55228 }
55229
55230 if (screen_info.vesapm_seg < 0xc000)
55231@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55232
55233 if (ypan || pmi_setpal) {
55234 unsigned short *pmi_base;
55235+
55236 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55237- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55238- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55239+
55240+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55241+ pax_open_kernel();
55242+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55243+#else
55244+ pmi_code = pmi_base;
55245+#endif
55246+
55247+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55248+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55249+
55250+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55251+ pmi_start = ktva_ktla(pmi_start);
55252+ pmi_pal = ktva_ktla(pmi_pal);
55253+ pax_close_kernel();
55254+#endif
55255+
55256 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55257 if (pmi_base[3]) {
55258 printk(KERN_INFO "vesafb: pmi: ports = ");
55259@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55260 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55261 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55262
55263- if (!ypan)
55264- info->fbops->fb_pan_display = NULL;
55265+ if (!ypan) {
55266+ pax_open_kernel();
55267+ *(void **)&info->fbops->fb_pan_display = NULL;
55268+ pax_close_kernel();
55269+ }
55270
55271 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55272 err = -ENOMEM;
55273@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55274 fb_info(info, "%s frame buffer device\n", info->fix.id);
55275 return 0;
55276 err:
55277+
55278+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55279+ module_memfree_exec(pmi_code);
55280+#endif
55281+
55282 if (info->screen_base)
55283 iounmap(info->screen_base);
55284 framebuffer_release(info);
55285diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55286index 88714ae..16c2e11 100644
55287--- a/drivers/video/fbdev/via/via_clock.h
55288+++ b/drivers/video/fbdev/via/via_clock.h
55289@@ -56,7 +56,7 @@ struct via_clock {
55290
55291 void (*set_engine_pll_state)(u8 state);
55292 void (*set_engine_pll)(struct via_pll_config config);
55293-};
55294+} __no_const;
55295
55296
55297 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55298diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55299index 3c14e43..2630570 100644
55300--- a/drivers/video/logo/logo_linux_clut224.ppm
55301+++ b/drivers/video/logo/logo_linux_clut224.ppm
55302@@ -2,1603 +2,1123 @@ P3
55303 # Standard 224-color Linux logo
55304 80 80
55305 255
55306- 0 0 0 0 0 0 0 0 0 0 0 0
55307- 0 0 0 0 0 0 0 0 0 0 0 0
55308- 0 0 0 0 0 0 0 0 0 0 0 0
55309- 0 0 0 0 0 0 0 0 0 0 0 0
55310- 0 0 0 0 0 0 0 0 0 0 0 0
55311- 0 0 0 0 0 0 0 0 0 0 0 0
55312- 0 0 0 0 0 0 0 0 0 0 0 0
55313- 0 0 0 0 0 0 0 0 0 0 0 0
55314- 0 0 0 0 0 0 0 0 0 0 0 0
55315- 6 6 6 6 6 6 10 10 10 10 10 10
55316- 10 10 10 6 6 6 6 6 6 6 6 6
55317- 0 0 0 0 0 0 0 0 0 0 0 0
55318- 0 0 0 0 0 0 0 0 0 0 0 0
55319- 0 0 0 0 0 0 0 0 0 0 0 0
55320- 0 0 0 0 0 0 0 0 0 0 0 0
55321- 0 0 0 0 0 0 0 0 0 0 0 0
55322- 0 0 0 0 0 0 0 0 0 0 0 0
55323- 0 0 0 0 0 0 0 0 0 0 0 0
55324- 0 0 0 0 0 0 0 0 0 0 0 0
55325- 0 0 0 0 0 0 0 0 0 0 0 0
55326- 0 0 0 0 0 0 0 0 0 0 0 0
55327- 0 0 0 0 0 0 0 0 0 0 0 0
55328- 0 0 0 0 0 0 0 0 0 0 0 0
55329- 0 0 0 0 0 0 0 0 0 0 0 0
55330- 0 0 0 0 0 0 0 0 0 0 0 0
55331- 0 0 0 0 0 0 0 0 0 0 0 0
55332- 0 0 0 0 0 0 0 0 0 0 0 0
55333- 0 0 0 0 0 0 0 0 0 0 0 0
55334- 0 0 0 6 6 6 10 10 10 14 14 14
55335- 22 22 22 26 26 26 30 30 30 34 34 34
55336- 30 30 30 30 30 30 26 26 26 18 18 18
55337- 14 14 14 10 10 10 6 6 6 0 0 0
55338- 0 0 0 0 0 0 0 0 0 0 0 0
55339- 0 0 0 0 0 0 0 0 0 0 0 0
55340- 0 0 0 0 0 0 0 0 0 0 0 0
55341- 0 0 0 0 0 0 0 0 0 0 0 0
55342- 0 0 0 0 0 0 0 0 0 0 0 0
55343- 0 0 0 0 0 0 0 0 0 0 0 0
55344- 0 0 0 0 0 0 0 0 0 0 0 0
55345- 0 0 0 0 0 0 0 0 0 0 0 0
55346- 0 0 0 0 0 0 0 0 0 0 0 0
55347- 0 0 0 0 0 1 0 0 1 0 0 0
55348- 0 0 0 0 0 0 0 0 0 0 0 0
55349- 0 0 0 0 0 0 0 0 0 0 0 0
55350- 0 0 0 0 0 0 0 0 0 0 0 0
55351- 0 0 0 0 0 0 0 0 0 0 0 0
55352- 0 0 0 0 0 0 0 0 0 0 0 0
55353- 0 0 0 0 0 0 0 0 0 0 0 0
55354- 6 6 6 14 14 14 26 26 26 42 42 42
55355- 54 54 54 66 66 66 78 78 78 78 78 78
55356- 78 78 78 74 74 74 66 66 66 54 54 54
55357- 42 42 42 26 26 26 18 18 18 10 10 10
55358- 6 6 6 0 0 0 0 0 0 0 0 0
55359- 0 0 0 0 0 0 0 0 0 0 0 0
55360- 0 0 0 0 0 0 0 0 0 0 0 0
55361- 0 0 0 0 0 0 0 0 0 0 0 0
55362- 0 0 0 0 0 0 0 0 0 0 0 0
55363- 0 0 0 0 0 0 0 0 0 0 0 0
55364- 0 0 0 0 0 0 0 0 0 0 0 0
55365- 0 0 0 0 0 0 0 0 0 0 0 0
55366- 0 0 0 0 0 0 0 0 0 0 0 0
55367- 0 0 1 0 0 0 0 0 0 0 0 0
55368- 0 0 0 0 0 0 0 0 0 0 0 0
55369- 0 0 0 0 0 0 0 0 0 0 0 0
55370- 0 0 0 0 0 0 0 0 0 0 0 0
55371- 0 0 0 0 0 0 0 0 0 0 0 0
55372- 0 0 0 0 0 0 0 0 0 0 0 0
55373- 0 0 0 0 0 0 0 0 0 10 10 10
55374- 22 22 22 42 42 42 66 66 66 86 86 86
55375- 66 66 66 38 38 38 38 38 38 22 22 22
55376- 26 26 26 34 34 34 54 54 54 66 66 66
55377- 86 86 86 70 70 70 46 46 46 26 26 26
55378- 14 14 14 6 6 6 0 0 0 0 0 0
55379- 0 0 0 0 0 0 0 0 0 0 0 0
55380- 0 0 0 0 0 0 0 0 0 0 0 0
55381- 0 0 0 0 0 0 0 0 0 0 0 0
55382- 0 0 0 0 0 0 0 0 0 0 0 0
55383- 0 0 0 0 0 0 0 0 0 0 0 0
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 0 0 0 0 0 0 0 0 0 0
55386- 0 0 0 0 0 0 0 0 0 0 0 0
55387- 0 0 1 0 0 1 0 0 1 0 0 0
55388- 0 0 0 0 0 0 0 0 0 0 0 0
55389- 0 0 0 0 0 0 0 0 0 0 0 0
55390- 0 0 0 0 0 0 0 0 0 0 0 0
55391- 0 0 0 0 0 0 0 0 0 0 0 0
55392- 0 0 0 0 0 0 0 0 0 0 0 0
55393- 0 0 0 0 0 0 10 10 10 26 26 26
55394- 50 50 50 82 82 82 58 58 58 6 6 6
55395- 2 2 6 2 2 6 2 2 6 2 2 6
55396- 2 2 6 2 2 6 2 2 6 2 2 6
55397- 6 6 6 54 54 54 86 86 86 66 66 66
55398- 38 38 38 18 18 18 6 6 6 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 0 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 0 0 0
55408- 0 0 0 0 0 0 0 0 0 0 0 0
55409- 0 0 0 0 0 0 0 0 0 0 0 0
55410- 0 0 0 0 0 0 0 0 0 0 0 0
55411- 0 0 0 0 0 0 0 0 0 0 0 0
55412- 0 0 0 0 0 0 0 0 0 0 0 0
55413- 0 0 0 6 6 6 22 22 22 50 50 50
55414- 78 78 78 34 34 34 2 2 6 2 2 6
55415- 2 2 6 2 2 6 2 2 6 2 2 6
55416- 2 2 6 2 2 6 2 2 6 2 2 6
55417- 2 2 6 2 2 6 6 6 6 70 70 70
55418- 78 78 78 46 46 46 22 22 22 6 6 6
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 0 0 0 0 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 0 0 0 0 0 0 0
55426- 0 0 0 0 0 0 0 0 0 0 0 0
55427- 0 0 1 0 0 1 0 0 1 0 0 0
55428- 0 0 0 0 0 0 0 0 0 0 0 0
55429- 0 0 0 0 0 0 0 0 0 0 0 0
55430- 0 0 0 0 0 0 0 0 0 0 0 0
55431- 0 0 0 0 0 0 0 0 0 0 0 0
55432- 0 0 0 0 0 0 0 0 0 0 0 0
55433- 6 6 6 18 18 18 42 42 42 82 82 82
55434- 26 26 26 2 2 6 2 2 6 2 2 6
55435- 2 2 6 2 2 6 2 2 6 2 2 6
55436- 2 2 6 2 2 6 2 2 6 14 14 14
55437- 46 46 46 34 34 34 6 6 6 2 2 6
55438- 42 42 42 78 78 78 42 42 42 18 18 18
55439- 6 6 6 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 0 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 0 0 0 0 0 0 0 0 0 0
55446- 0 0 0 0 0 0 0 0 0 0 0 0
55447- 0 0 1 0 0 0 0 0 1 0 0 0
55448- 0 0 0 0 0 0 0 0 0 0 0 0
55449- 0 0 0 0 0 0 0 0 0 0 0 0
55450- 0 0 0 0 0 0 0 0 0 0 0 0
55451- 0 0 0 0 0 0 0 0 0 0 0 0
55452- 0 0 0 0 0 0 0 0 0 0 0 0
55453- 10 10 10 30 30 30 66 66 66 58 58 58
55454- 2 2 6 2 2 6 2 2 6 2 2 6
55455- 2 2 6 2 2 6 2 2 6 2 2 6
55456- 2 2 6 2 2 6 2 2 6 26 26 26
55457- 86 86 86 101 101 101 46 46 46 10 10 10
55458- 2 2 6 58 58 58 70 70 70 34 34 34
55459- 10 10 10 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 0 0 0 0 0 0 0 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 0 0 0 0 0 0 0 0 0 0
55466- 0 0 0 0 0 0 0 0 0 0 0 0
55467- 0 0 1 0 0 1 0 0 1 0 0 0
55468- 0 0 0 0 0 0 0 0 0 0 0 0
55469- 0 0 0 0 0 0 0 0 0 0 0 0
55470- 0 0 0 0 0 0 0 0 0 0 0 0
55471- 0 0 0 0 0 0 0 0 0 0 0 0
55472- 0 0 0 0 0 0 0 0 0 0 0 0
55473- 14 14 14 42 42 42 86 86 86 10 10 10
55474- 2 2 6 2 2 6 2 2 6 2 2 6
55475- 2 2 6 2 2 6 2 2 6 2 2 6
55476- 2 2 6 2 2 6 2 2 6 30 30 30
55477- 94 94 94 94 94 94 58 58 58 26 26 26
55478- 2 2 6 6 6 6 78 78 78 54 54 54
55479- 22 22 22 6 6 6 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 0 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 0 0 0 0 0 0 0 0 0 0
55486- 0 0 0 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 0 0 0
55488- 0 0 0 0 0 0 0 0 0 0 0 0
55489- 0 0 0 0 0 0 0 0 0 0 0 0
55490- 0 0 0 0 0 0 0 0 0 0 0 0
55491- 0 0 0 0 0 0 0 0 0 0 0 0
55492- 0 0 0 0 0 0 0 0 0 6 6 6
55493- 22 22 22 62 62 62 62 62 62 2 2 6
55494- 2 2 6 2 2 6 2 2 6 2 2 6
55495- 2 2 6 2 2 6 2 2 6 2 2 6
55496- 2 2 6 2 2 6 2 2 6 26 26 26
55497- 54 54 54 38 38 38 18 18 18 10 10 10
55498- 2 2 6 2 2 6 34 34 34 82 82 82
55499- 38 38 38 14 14 14 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 0 0 0 0
55502- 0 0 0 0 0 0 0 0 0 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 0 0 0 0 0 0 0 0 0 0
55506- 0 0 0 0 0 0 0 0 0 0 0 0
55507- 0 0 0 0 0 1 0 0 1 0 0 0
55508- 0 0 0 0 0 0 0 0 0 0 0 0
55509- 0 0 0 0 0 0 0 0 0 0 0 0
55510- 0 0 0 0 0 0 0 0 0 0 0 0
55511- 0 0 0 0 0 0 0 0 0 0 0 0
55512- 0 0 0 0 0 0 0 0 0 6 6 6
55513- 30 30 30 78 78 78 30 30 30 2 2 6
55514- 2 2 6 2 2 6 2 2 6 2 2 6
55515- 2 2 6 2 2 6 2 2 6 2 2 6
55516- 2 2 6 2 2 6 2 2 6 10 10 10
55517- 10 10 10 2 2 6 2 2 6 2 2 6
55518- 2 2 6 2 2 6 2 2 6 78 78 78
55519- 50 50 50 18 18 18 6 6 6 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 0 0 0 0 0 0 0 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 1 0 0 0 0 0 0 0 0 0
55528- 0 0 0 0 0 0 0 0 0 0 0 0
55529- 0 0 0 0 0 0 0 0 0 0 0 0
55530- 0 0 0 0 0 0 0 0 0 0 0 0
55531- 0 0 0 0 0 0 0 0 0 0 0 0
55532- 0 0 0 0 0 0 0 0 0 10 10 10
55533- 38 38 38 86 86 86 14 14 14 2 2 6
55534- 2 2 6 2 2 6 2 2 6 2 2 6
55535- 2 2 6 2 2 6 2 2 6 2 2 6
55536- 2 2 6 2 2 6 2 2 6 2 2 6
55537- 2 2 6 2 2 6 2 2 6 2 2 6
55538- 2 2 6 2 2 6 2 2 6 54 54 54
55539- 66 66 66 26 26 26 6 6 6 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 0 0 0 0
55542- 0 0 0 0 0 0 0 0 0 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 0 0 0 0
55545- 0 0 0 0 0 0 0 0 0 0 0 0
55546- 0 0 0 0 0 0 0 0 0 0 0 0
55547- 0 0 0 0 0 1 0 0 1 0 0 0
55548- 0 0 0 0 0 0 0 0 0 0 0 0
55549- 0 0 0 0 0 0 0 0 0 0 0 0
55550- 0 0 0 0 0 0 0 0 0 0 0 0
55551- 0 0 0 0 0 0 0 0 0 0 0 0
55552- 0 0 0 0 0 0 0 0 0 14 14 14
55553- 42 42 42 82 82 82 2 2 6 2 2 6
55554- 2 2 6 6 6 6 10 10 10 2 2 6
55555- 2 2 6 2 2 6 2 2 6 2 2 6
55556- 2 2 6 2 2 6 2 2 6 6 6 6
55557- 14 14 14 10 10 10 2 2 6 2 2 6
55558- 2 2 6 2 2 6 2 2 6 18 18 18
55559- 82 82 82 34 34 34 10 10 10 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 0 0 0 0
55562- 0 0 0 0 0 0 0 0 0 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 1 0 0 0 0 0 0 0 0 0
55568- 0 0 0 0 0 0 0 0 0 0 0 0
55569- 0 0 0 0 0 0 0 0 0 0 0 0
55570- 0 0 0 0 0 0 0 0 0 0 0 0
55571- 0 0 0 0 0 0 0 0 0 0 0 0
55572- 0 0 0 0 0 0 0 0 0 14 14 14
55573- 46 46 46 86 86 86 2 2 6 2 2 6
55574- 6 6 6 6 6 6 22 22 22 34 34 34
55575- 6 6 6 2 2 6 2 2 6 2 2 6
55576- 2 2 6 2 2 6 18 18 18 34 34 34
55577- 10 10 10 50 50 50 22 22 22 2 2 6
55578- 2 2 6 2 2 6 2 2 6 10 10 10
55579- 86 86 86 42 42 42 14 14 14 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 0 0 0 0
55582- 0 0 0 0 0 0 0 0 0 0 0 0
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 0 0 0 0
55585- 0 0 0 0 0 0 0 0 0 0 0 0
55586- 0 0 0 0 0 0 0 0 0 0 0 0
55587- 0 0 1 0 0 1 0 0 1 0 0 0
55588- 0 0 0 0 0 0 0 0 0 0 0 0
55589- 0 0 0 0 0 0 0 0 0 0 0 0
55590- 0 0 0 0 0 0 0 0 0 0 0 0
55591- 0 0 0 0 0 0 0 0 0 0 0 0
55592- 0 0 0 0 0 0 0 0 0 14 14 14
55593- 46 46 46 86 86 86 2 2 6 2 2 6
55594- 38 38 38 116 116 116 94 94 94 22 22 22
55595- 22 22 22 2 2 6 2 2 6 2 2 6
55596- 14 14 14 86 86 86 138 138 138 162 162 162
55597-154 154 154 38 38 38 26 26 26 6 6 6
55598- 2 2 6 2 2 6 2 2 6 2 2 6
55599- 86 86 86 46 46 46 14 14 14 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 0 0 0 0 0 0 0 0 0 0
55606- 0 0 0 0 0 0 0 0 0 0 0 0
55607- 0 0 0 0 0 0 0 0 0 0 0 0
55608- 0 0 0 0 0 0 0 0 0 0 0 0
55609- 0 0 0 0 0 0 0 0 0 0 0 0
55610- 0 0 0 0 0 0 0 0 0 0 0 0
55611- 0 0 0 0 0 0 0 0 0 0 0 0
55612- 0 0 0 0 0 0 0 0 0 14 14 14
55613- 46 46 46 86 86 86 2 2 6 14 14 14
55614-134 134 134 198 198 198 195 195 195 116 116 116
55615- 10 10 10 2 2 6 2 2 6 6 6 6
55616-101 98 89 187 187 187 210 210 210 218 218 218
55617-214 214 214 134 134 134 14 14 14 6 6 6
55618- 2 2 6 2 2 6 2 2 6 2 2 6
55619- 86 86 86 50 50 50 18 18 18 6 6 6
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 0 0 0 0 0 0 0
55626- 0 0 0 0 0 0 0 0 1 0 0 0
55627- 0 0 1 0 0 1 0 0 1 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 0 0 0
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 0 0 0 0 0 0 0 0 0 14 14 14
55633- 46 46 46 86 86 86 2 2 6 54 54 54
55634-218 218 218 195 195 195 226 226 226 246 246 246
55635- 58 58 58 2 2 6 2 2 6 30 30 30
55636-210 210 210 253 253 253 174 174 174 123 123 123
55637-221 221 221 234 234 234 74 74 74 2 2 6
55638- 2 2 6 2 2 6 2 2 6 2 2 6
55639- 70 70 70 58 58 58 22 22 22 6 6 6
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 0 0 0
55651- 0 0 0 0 0 0 0 0 0 0 0 0
55652- 0 0 0 0 0 0 0 0 0 14 14 14
55653- 46 46 46 82 82 82 2 2 6 106 106 106
55654-170 170 170 26 26 26 86 86 86 226 226 226
55655-123 123 123 10 10 10 14 14 14 46 46 46
55656-231 231 231 190 190 190 6 6 6 70 70 70
55657- 90 90 90 238 238 238 158 158 158 2 2 6
55658- 2 2 6 2 2 6 2 2 6 2 2 6
55659- 70 70 70 58 58 58 22 22 22 6 6 6
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 1 0 0 0
55667- 0 0 1 0 0 1 0 0 1 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 0 0 0
55671- 0 0 0 0 0 0 0 0 0 0 0 0
55672- 0 0 0 0 0 0 0 0 0 14 14 14
55673- 42 42 42 86 86 86 6 6 6 116 116 116
55674-106 106 106 6 6 6 70 70 70 149 149 149
55675-128 128 128 18 18 18 38 38 38 54 54 54
55676-221 221 221 106 106 106 2 2 6 14 14 14
55677- 46 46 46 190 190 190 198 198 198 2 2 6
55678- 2 2 6 2 2 6 2 2 6 2 2 6
55679- 74 74 74 62 62 62 22 22 22 6 6 6
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 1 0 0 0
55687- 0 0 1 0 0 0 0 0 1 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 0 0 0
55691- 0 0 0 0 0 0 0 0 0 0 0 0
55692- 0 0 0 0 0 0 0 0 0 14 14 14
55693- 42 42 42 94 94 94 14 14 14 101 101 101
55694-128 128 128 2 2 6 18 18 18 116 116 116
55695-118 98 46 121 92 8 121 92 8 98 78 10
55696-162 162 162 106 106 106 2 2 6 2 2 6
55697- 2 2 6 195 195 195 195 195 195 6 6 6
55698- 2 2 6 2 2 6 2 2 6 2 2 6
55699- 74 74 74 62 62 62 22 22 22 6 6 6
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 1 0 0 1
55707- 0 0 1 0 0 0 0 0 1 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 0 0 0
55711- 0 0 0 0 0 0 0 0 0 0 0 0
55712- 0 0 0 0 0 0 0 0 0 10 10 10
55713- 38 38 38 90 90 90 14 14 14 58 58 58
55714-210 210 210 26 26 26 54 38 6 154 114 10
55715-226 170 11 236 186 11 225 175 15 184 144 12
55716-215 174 15 175 146 61 37 26 9 2 2 6
55717- 70 70 70 246 246 246 138 138 138 2 2 6
55718- 2 2 6 2 2 6 2 2 6 2 2 6
55719- 70 70 70 66 66 66 26 26 26 6 6 6
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 0 0 0
55731- 0 0 0 0 0 0 0 0 0 0 0 0
55732- 0 0 0 0 0 0 0 0 0 10 10 10
55733- 38 38 38 86 86 86 14 14 14 10 10 10
55734-195 195 195 188 164 115 192 133 9 225 175 15
55735-239 182 13 234 190 10 232 195 16 232 200 30
55736-245 207 45 241 208 19 232 195 16 184 144 12
55737-218 194 134 211 206 186 42 42 42 2 2 6
55738- 2 2 6 2 2 6 2 2 6 2 2 6
55739- 50 50 50 74 74 74 30 30 30 6 6 6
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 0 0 0
55751- 0 0 0 0 0 0 0 0 0 0 0 0
55752- 0 0 0 0 0 0 0 0 0 10 10 10
55753- 34 34 34 86 86 86 14 14 14 2 2 6
55754-121 87 25 192 133 9 219 162 10 239 182 13
55755-236 186 11 232 195 16 241 208 19 244 214 54
55756-246 218 60 246 218 38 246 215 20 241 208 19
55757-241 208 19 226 184 13 121 87 25 2 2 6
55758- 2 2 6 2 2 6 2 2 6 2 2 6
55759- 50 50 50 82 82 82 34 34 34 10 10 10
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 0 0 0
55771- 0 0 0 0 0 0 0 0 0 0 0 0
55772- 0 0 0 0 0 0 0 0 0 10 10 10
55773- 34 34 34 82 82 82 30 30 30 61 42 6
55774-180 123 7 206 145 10 230 174 11 239 182 13
55775-234 190 10 238 202 15 241 208 19 246 218 74
55776-246 218 38 246 215 20 246 215 20 246 215 20
55777-226 184 13 215 174 15 184 144 12 6 6 6
55778- 2 2 6 2 2 6 2 2 6 2 2 6
55779- 26 26 26 94 94 94 42 42 42 14 14 14
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 0 0 0 0 0 0
55791- 0 0 0 0 0 0 0 0 0 0 0 0
55792- 0 0 0 0 0 0 0 0 0 10 10 10
55793- 30 30 30 78 78 78 50 50 50 104 69 6
55794-192 133 9 216 158 10 236 178 12 236 186 11
55795-232 195 16 241 208 19 244 214 54 245 215 43
55796-246 215 20 246 215 20 241 208 19 198 155 10
55797-200 144 11 216 158 10 156 118 10 2 2 6
55798- 2 2 6 2 2 6 2 2 6 2 2 6
55799- 6 6 6 90 90 90 54 54 54 18 18 18
55800- 6 6 6 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 0 0 0 0 0 0
55811- 0 0 0 0 0 0 0 0 0 0 0 0
55812- 0 0 0 0 0 0 0 0 0 10 10 10
55813- 30 30 30 78 78 78 46 46 46 22 22 22
55814-137 92 6 210 162 10 239 182 13 238 190 10
55815-238 202 15 241 208 19 246 215 20 246 215 20
55816-241 208 19 203 166 17 185 133 11 210 150 10
55817-216 158 10 210 150 10 102 78 10 2 2 6
55818- 6 6 6 54 54 54 14 14 14 2 2 6
55819- 2 2 6 62 62 62 74 74 74 30 30 30
55820- 10 10 10 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 0 0 0 0 0 0 0 0 0
55831- 0 0 0 0 0 0 0 0 0 0 0 0
55832- 0 0 0 0 0 0 0 0 0 10 10 10
55833- 34 34 34 78 78 78 50 50 50 6 6 6
55834- 94 70 30 139 102 15 190 146 13 226 184 13
55835-232 200 30 232 195 16 215 174 15 190 146 13
55836-168 122 10 192 133 9 210 150 10 213 154 11
55837-202 150 34 182 157 106 101 98 89 2 2 6
55838- 2 2 6 78 78 78 116 116 116 58 58 58
55839- 2 2 6 22 22 22 90 90 90 46 46 46
55840- 18 18 18 6 6 6 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 0 0 0 0 0 0 0 0 0 0 0 0
55851- 0 0 0 0 0 0 0 0 0 0 0 0
55852- 0 0 0 0 0 0 0 0 0 10 10 10
55853- 38 38 38 86 86 86 50 50 50 6 6 6
55854-128 128 128 174 154 114 156 107 11 168 122 10
55855-198 155 10 184 144 12 197 138 11 200 144 11
55856-206 145 10 206 145 10 197 138 11 188 164 115
55857-195 195 195 198 198 198 174 174 174 14 14 14
55858- 2 2 6 22 22 22 116 116 116 116 116 116
55859- 22 22 22 2 2 6 74 74 74 70 70 70
55860- 30 30 30 10 10 10 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 0 0 0
55870- 0 0 0 0 0 0 0 0 0 0 0 0
55871- 0 0 0 0 0 0 0 0 0 0 0 0
55872- 0 0 0 0 0 0 6 6 6 18 18 18
55873- 50 50 50 101 101 101 26 26 26 10 10 10
55874-138 138 138 190 190 190 174 154 114 156 107 11
55875-197 138 11 200 144 11 197 138 11 192 133 9
55876-180 123 7 190 142 34 190 178 144 187 187 187
55877-202 202 202 221 221 221 214 214 214 66 66 66
55878- 2 2 6 2 2 6 50 50 50 62 62 62
55879- 6 6 6 2 2 6 10 10 10 90 90 90
55880- 50 50 50 18 18 18 6 6 6 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 0 0 0
55890- 0 0 0 0 0 0 0 0 0 0 0 0
55891- 0 0 0 0 0 0 0 0 0 0 0 0
55892- 0 0 0 0 0 0 10 10 10 34 34 34
55893- 74 74 74 74 74 74 2 2 6 6 6 6
55894-144 144 144 198 198 198 190 190 190 178 166 146
55895-154 121 60 156 107 11 156 107 11 168 124 44
55896-174 154 114 187 187 187 190 190 190 210 210 210
55897-246 246 246 253 253 253 253 253 253 182 182 182
55898- 6 6 6 2 2 6 2 2 6 2 2 6
55899- 2 2 6 2 2 6 2 2 6 62 62 62
55900- 74 74 74 34 34 34 14 14 14 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 0 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 0 0 0
55910- 0 0 0 0 0 0 0 0 0 0 0 0
55911- 0 0 0 0 0 0 0 0 0 0 0 0
55912- 0 0 0 10 10 10 22 22 22 54 54 54
55913- 94 94 94 18 18 18 2 2 6 46 46 46
55914-234 234 234 221 221 221 190 190 190 190 190 190
55915-190 190 190 187 187 187 187 187 187 190 190 190
55916-190 190 190 195 195 195 214 214 214 242 242 242
55917-253 253 253 253 253 253 253 253 253 253 253 253
55918- 82 82 82 2 2 6 2 2 6 2 2 6
55919- 2 2 6 2 2 6 2 2 6 14 14 14
55920- 86 86 86 54 54 54 22 22 22 6 6 6
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 0 0 0
55930- 0 0 0 0 0 0 0 0 0 0 0 0
55931- 0 0 0 0 0 0 0 0 0 0 0 0
55932- 6 6 6 18 18 18 46 46 46 90 90 90
55933- 46 46 46 18 18 18 6 6 6 182 182 182
55934-253 253 253 246 246 246 206 206 206 190 190 190
55935-190 190 190 190 190 190 190 190 190 190 190 190
55936-206 206 206 231 231 231 250 250 250 253 253 253
55937-253 253 253 253 253 253 253 253 253 253 253 253
55938-202 202 202 14 14 14 2 2 6 2 2 6
55939- 2 2 6 2 2 6 2 2 6 2 2 6
55940- 42 42 42 86 86 86 42 42 42 18 18 18
55941- 6 6 6 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 0 0 0
55944- 0 0 0 0 0 0 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 0 0 0
55950- 0 0 0 0 0 0 0 0 0 0 0 0
55951- 0 0 0 0 0 0 0 0 0 6 6 6
55952- 14 14 14 38 38 38 74 74 74 66 66 66
55953- 2 2 6 6 6 6 90 90 90 250 250 250
55954-253 253 253 253 253 253 238 238 238 198 198 198
55955-190 190 190 190 190 190 195 195 195 221 221 221
55956-246 246 246 253 253 253 253 253 253 253 253 253
55957-253 253 253 253 253 253 253 253 253 253 253 253
55958-253 253 253 82 82 82 2 2 6 2 2 6
55959- 2 2 6 2 2 6 2 2 6 2 2 6
55960- 2 2 6 78 78 78 70 70 70 34 34 34
55961- 14 14 14 6 6 6 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 0 0 0
55970- 0 0 0 0 0 0 0 0 0 0 0 0
55971- 0 0 0 0 0 0 0 0 0 14 14 14
55972- 34 34 34 66 66 66 78 78 78 6 6 6
55973- 2 2 6 18 18 18 218 218 218 253 253 253
55974-253 253 253 253 253 253 253 253 253 246 246 246
55975-226 226 226 231 231 231 246 246 246 253 253 253
55976-253 253 253 253 253 253 253 253 253 253 253 253
55977-253 253 253 253 253 253 253 253 253 253 253 253
55978-253 253 253 178 178 178 2 2 6 2 2 6
55979- 2 2 6 2 2 6 2 2 6 2 2 6
55980- 2 2 6 18 18 18 90 90 90 62 62 62
55981- 30 30 30 10 10 10 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 0 0 0
55984- 0 0 0 0 0 0 0 0 0 0 0 0
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 0 0 0
55990- 0 0 0 0 0 0 0 0 0 0 0 0
55991- 0 0 0 0 0 0 10 10 10 26 26 26
55992- 58 58 58 90 90 90 18 18 18 2 2 6
55993- 2 2 6 110 110 110 253 253 253 253 253 253
55994-253 253 253 253 253 253 253 253 253 253 253 253
55995-250 250 250 253 253 253 253 253 253 253 253 253
55996-253 253 253 253 253 253 253 253 253 253 253 253
55997-253 253 253 253 253 253 253 253 253 253 253 253
55998-253 253 253 231 231 231 18 18 18 2 2 6
55999- 2 2 6 2 2 6 2 2 6 2 2 6
56000- 2 2 6 2 2 6 18 18 18 94 94 94
56001- 54 54 54 26 26 26 10 10 10 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 0 0 0
56004- 0 0 0 0 0 0 0 0 0 0 0 0
56005- 0 0 0 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 0 0 0
56010- 0 0 0 0 0 0 0 0 0 0 0 0
56011- 0 0 0 6 6 6 22 22 22 50 50 50
56012- 90 90 90 26 26 26 2 2 6 2 2 6
56013- 14 14 14 195 195 195 250 250 250 253 253 253
56014-253 253 253 253 253 253 253 253 253 253 253 253
56015-253 253 253 253 253 253 253 253 253 253 253 253
56016-253 253 253 253 253 253 253 253 253 253 253 253
56017-253 253 253 253 253 253 253 253 253 253 253 253
56018-250 250 250 242 242 242 54 54 54 2 2 6
56019- 2 2 6 2 2 6 2 2 6 2 2 6
56020- 2 2 6 2 2 6 2 2 6 38 38 38
56021- 86 86 86 50 50 50 22 22 22 6 6 6
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 0 0 0
56024- 0 0 0 0 0 0 0 0 0 0 0 0
56025- 0 0 0 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 0 0 0
56030- 0 0 0 0 0 0 0 0 0 0 0 0
56031- 6 6 6 14 14 14 38 38 38 82 82 82
56032- 34 34 34 2 2 6 2 2 6 2 2 6
56033- 42 42 42 195 195 195 246 246 246 253 253 253
56034-253 253 253 253 253 253 253 253 253 250 250 250
56035-242 242 242 242 242 242 250 250 250 253 253 253
56036-253 253 253 253 253 253 253 253 253 253 253 253
56037-253 253 253 250 250 250 246 246 246 238 238 238
56038-226 226 226 231 231 231 101 101 101 6 6 6
56039- 2 2 6 2 2 6 2 2 6 2 2 6
56040- 2 2 6 2 2 6 2 2 6 2 2 6
56041- 38 38 38 82 82 82 42 42 42 14 14 14
56042- 6 6 6 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 0 0 0
56050- 0 0 0 0 0 0 0 0 0 0 0 0
56051- 10 10 10 26 26 26 62 62 62 66 66 66
56052- 2 2 6 2 2 6 2 2 6 6 6 6
56053- 70 70 70 170 170 170 206 206 206 234 234 234
56054-246 246 246 250 250 250 250 250 250 238 238 238
56055-226 226 226 231 231 231 238 238 238 250 250 250
56056-250 250 250 250 250 250 246 246 246 231 231 231
56057-214 214 214 206 206 206 202 202 202 202 202 202
56058-198 198 198 202 202 202 182 182 182 18 18 18
56059- 2 2 6 2 2 6 2 2 6 2 2 6
56060- 2 2 6 2 2 6 2 2 6 2 2 6
56061- 2 2 6 62 62 62 66 66 66 30 30 30
56062- 10 10 10 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 0 0 0 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 0 0 0 0 0 0
56070- 0 0 0 0 0 0 0 0 0 0 0 0
56071- 14 14 14 42 42 42 82 82 82 18 18 18
56072- 2 2 6 2 2 6 2 2 6 10 10 10
56073- 94 94 94 182 182 182 218 218 218 242 242 242
56074-250 250 250 253 253 253 253 253 253 250 250 250
56075-234 234 234 253 253 253 253 253 253 253 253 253
56076-253 253 253 253 253 253 253 253 253 246 246 246
56077-238 238 238 226 226 226 210 210 210 202 202 202
56078-195 195 195 195 195 195 210 210 210 158 158 158
56079- 6 6 6 14 14 14 50 50 50 14 14 14
56080- 2 2 6 2 2 6 2 2 6 2 2 6
56081- 2 2 6 6 6 6 86 86 86 46 46 46
56082- 18 18 18 6 6 6 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 0 0 0 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 0 0 0 0 0 0 0 0 0
56090- 0 0 0 0 0 0 0 0 0 6 6 6
56091- 22 22 22 54 54 54 70 70 70 2 2 6
56092- 2 2 6 10 10 10 2 2 6 22 22 22
56093-166 166 166 231 231 231 250 250 250 253 253 253
56094-253 253 253 253 253 253 253 253 253 250 250 250
56095-242 242 242 253 253 253 253 253 253 253 253 253
56096-253 253 253 253 253 253 253 253 253 253 253 253
56097-253 253 253 253 253 253 253 253 253 246 246 246
56098-231 231 231 206 206 206 198 198 198 226 226 226
56099- 94 94 94 2 2 6 6 6 6 38 38 38
56100- 30 30 30 2 2 6 2 2 6 2 2 6
56101- 2 2 6 2 2 6 62 62 62 66 66 66
56102- 26 26 26 10 10 10 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 0 0 0 0 0 0 0 0 0 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 0 0 0 0 0 0 0 0 0
56109- 0 0 0 0 0 0 0 0 0 0 0 0
56110- 0 0 0 0 0 0 0 0 0 10 10 10
56111- 30 30 30 74 74 74 50 50 50 2 2 6
56112- 26 26 26 26 26 26 2 2 6 106 106 106
56113-238 238 238 253 253 253 253 253 253 253 253 253
56114-253 253 253 253 253 253 253 253 253 253 253 253
56115-253 253 253 253 253 253 253 253 253 253 253 253
56116-253 253 253 253 253 253 253 253 253 253 253 253
56117-253 253 253 253 253 253 253 253 253 253 253 253
56118-253 253 253 246 246 246 218 218 218 202 202 202
56119-210 210 210 14 14 14 2 2 6 2 2 6
56120- 30 30 30 22 22 22 2 2 6 2 2 6
56121- 2 2 6 2 2 6 18 18 18 86 86 86
56122- 42 42 42 14 14 14 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 0 0 0 0 0 0 0 0 0 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 0 0 0 0 0 0 0 0 0
56129- 0 0 0 0 0 0 0 0 0 0 0 0
56130- 0 0 0 0 0 0 0 0 0 14 14 14
56131- 42 42 42 90 90 90 22 22 22 2 2 6
56132- 42 42 42 2 2 6 18 18 18 218 218 218
56133-253 253 253 253 253 253 253 253 253 253 253 253
56134-253 253 253 253 253 253 253 253 253 253 253 253
56135-253 253 253 253 253 253 253 253 253 253 253 253
56136-253 253 253 253 253 253 253 253 253 253 253 253
56137-253 253 253 253 253 253 253 253 253 253 253 253
56138-253 253 253 253 253 253 250 250 250 221 221 221
56139-218 218 218 101 101 101 2 2 6 14 14 14
56140- 18 18 18 38 38 38 10 10 10 2 2 6
56141- 2 2 6 2 2 6 2 2 6 78 78 78
56142- 58 58 58 22 22 22 6 6 6 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 0 0 0
56145- 0 0 0 0 0 0 0 0 0 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 0 0 0 0 0 0 0 0 0 0 0 0
56149- 0 0 0 0 0 0 0 0 0 0 0 0
56150- 0 0 0 0 0 0 6 6 6 18 18 18
56151- 54 54 54 82 82 82 2 2 6 26 26 26
56152- 22 22 22 2 2 6 123 123 123 253 253 253
56153-253 253 253 253 253 253 253 253 253 253 253 253
56154-253 253 253 253 253 253 253 253 253 253 253 253
56155-253 253 253 253 253 253 253 253 253 253 253 253
56156-253 253 253 253 253 253 253 253 253 253 253 253
56157-253 253 253 253 253 253 253 253 253 253 253 253
56158-253 253 253 253 253 253 253 253 253 250 250 250
56159-238 238 238 198 198 198 6 6 6 38 38 38
56160- 58 58 58 26 26 26 38 38 38 2 2 6
56161- 2 2 6 2 2 6 2 2 6 46 46 46
56162- 78 78 78 30 30 30 10 10 10 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 0 0 0
56165- 0 0 0 0 0 0 0 0 0 0 0 0
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 0 0 0 0 0 0 0 0 0 0 0 0
56169- 0 0 0 0 0 0 0 0 0 0 0 0
56170- 0 0 0 0 0 0 10 10 10 30 30 30
56171- 74 74 74 58 58 58 2 2 6 42 42 42
56172- 2 2 6 22 22 22 231 231 231 253 253 253
56173-253 253 253 253 253 253 253 253 253 253 253 253
56174-253 253 253 253 253 253 253 253 253 250 250 250
56175-253 253 253 253 253 253 253 253 253 253 253 253
56176-253 253 253 253 253 253 253 253 253 253 253 253
56177-253 253 253 253 253 253 253 253 253 253 253 253
56178-253 253 253 253 253 253 253 253 253 253 253 253
56179-253 253 253 246 246 246 46 46 46 38 38 38
56180- 42 42 42 14 14 14 38 38 38 14 14 14
56181- 2 2 6 2 2 6 2 2 6 6 6 6
56182- 86 86 86 46 46 46 14 14 14 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 0 0 0
56185- 0 0 0 0 0 0 0 0 0 0 0 0
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 0 0 0
56188- 0 0 0 0 0 0 0 0 0 0 0 0
56189- 0 0 0 0 0 0 0 0 0 0 0 0
56190- 0 0 0 6 6 6 14 14 14 42 42 42
56191- 90 90 90 18 18 18 18 18 18 26 26 26
56192- 2 2 6 116 116 116 253 253 253 253 253 253
56193-253 253 253 253 253 253 253 253 253 253 253 253
56194-253 253 253 253 253 253 250 250 250 238 238 238
56195-253 253 253 253 253 253 253 253 253 253 253 253
56196-253 253 253 253 253 253 253 253 253 253 253 253
56197-253 253 253 253 253 253 253 253 253 253 253 253
56198-253 253 253 253 253 253 253 253 253 253 253 253
56199-253 253 253 253 253 253 94 94 94 6 6 6
56200- 2 2 6 2 2 6 10 10 10 34 34 34
56201- 2 2 6 2 2 6 2 2 6 2 2 6
56202- 74 74 74 58 58 58 22 22 22 6 6 6
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 0 0 0 0 0 0
56205- 0 0 0 0 0 0 0 0 0 0 0 0
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 0 0 0
56208- 0 0 0 0 0 0 0 0 0 0 0 0
56209- 0 0 0 0 0 0 0 0 0 0 0 0
56210- 0 0 0 10 10 10 26 26 26 66 66 66
56211- 82 82 82 2 2 6 38 38 38 6 6 6
56212- 14 14 14 210 210 210 253 253 253 253 253 253
56213-253 253 253 253 253 253 253 253 253 253 253 253
56214-253 253 253 253 253 253 246 246 246 242 242 242
56215-253 253 253 253 253 253 253 253 253 253 253 253
56216-253 253 253 253 253 253 253 253 253 253 253 253
56217-253 253 253 253 253 253 253 253 253 253 253 253
56218-253 253 253 253 253 253 253 253 253 253 253 253
56219-253 253 253 253 253 253 144 144 144 2 2 6
56220- 2 2 6 2 2 6 2 2 6 46 46 46
56221- 2 2 6 2 2 6 2 2 6 2 2 6
56222- 42 42 42 74 74 74 30 30 30 10 10 10
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 0 0 0 0 0 0
56225- 0 0 0 0 0 0 0 0 0 0 0 0
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 0 0 0
56228- 0 0 0 0 0 0 0 0 0 0 0 0
56229- 0 0 0 0 0 0 0 0 0 0 0 0
56230- 6 6 6 14 14 14 42 42 42 90 90 90
56231- 26 26 26 6 6 6 42 42 42 2 2 6
56232- 74 74 74 250 250 250 253 253 253 253 253 253
56233-253 253 253 253 253 253 253 253 253 253 253 253
56234-253 253 253 253 253 253 242 242 242 242 242 242
56235-253 253 253 253 253 253 253 253 253 253 253 253
56236-253 253 253 253 253 253 253 253 253 253 253 253
56237-253 253 253 253 253 253 253 253 253 253 253 253
56238-253 253 253 253 253 253 253 253 253 253 253 253
56239-253 253 253 253 253 253 182 182 182 2 2 6
56240- 2 2 6 2 2 6 2 2 6 46 46 46
56241- 2 2 6 2 2 6 2 2 6 2 2 6
56242- 10 10 10 86 86 86 38 38 38 10 10 10
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 0 0 0 0 0 0
56245- 0 0 0 0 0 0 0 0 0 0 0 0
56246- 0 0 0 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 0 0 0 0 0 0
56248- 0 0 0 0 0 0 0 0 0 0 0 0
56249- 0 0 0 0 0 0 0 0 0 0 0 0
56250- 10 10 10 26 26 26 66 66 66 82 82 82
56251- 2 2 6 22 22 22 18 18 18 2 2 6
56252-149 149 149 253 253 253 253 253 253 253 253 253
56253-253 253 253 253 253 253 253 253 253 253 253 253
56254-253 253 253 253 253 253 234 234 234 242 242 242
56255-253 253 253 253 253 253 253 253 253 253 253 253
56256-253 253 253 253 253 253 253 253 253 253 253 253
56257-253 253 253 253 253 253 253 253 253 253 253 253
56258-253 253 253 253 253 253 253 253 253 253 253 253
56259-253 253 253 253 253 253 206 206 206 2 2 6
56260- 2 2 6 2 2 6 2 2 6 38 38 38
56261- 2 2 6 2 2 6 2 2 6 2 2 6
56262- 6 6 6 86 86 86 46 46 46 14 14 14
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 0 0 0 0 0 0
56265- 0 0 0 0 0 0 0 0 0 0 0 0
56266- 0 0 0 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 0 0 0 0 0 0
56268- 0 0 0 0 0 0 0 0 0 0 0 0
56269- 0 0 0 0 0 0 0 0 0 6 6 6
56270- 18 18 18 46 46 46 86 86 86 18 18 18
56271- 2 2 6 34 34 34 10 10 10 6 6 6
56272-210 210 210 253 253 253 253 253 253 253 253 253
56273-253 253 253 253 253 253 253 253 253 253 253 253
56274-253 253 253 253 253 253 234 234 234 242 242 242
56275-253 253 253 253 253 253 253 253 253 253 253 253
56276-253 253 253 253 253 253 253 253 253 253 253 253
56277-253 253 253 253 253 253 253 253 253 253 253 253
56278-253 253 253 253 253 253 253 253 253 253 253 253
56279-253 253 253 253 253 253 221 221 221 6 6 6
56280- 2 2 6 2 2 6 6 6 6 30 30 30
56281- 2 2 6 2 2 6 2 2 6 2 2 6
56282- 2 2 6 82 82 82 54 54 54 18 18 18
56283- 6 6 6 0 0 0 0 0 0 0 0 0
56284- 0 0 0 0 0 0 0 0 0 0 0 0
56285- 0 0 0 0 0 0 0 0 0 0 0 0
56286- 0 0 0 0 0 0 0 0 0 0 0 0
56287- 0 0 0 0 0 0 0 0 0 0 0 0
56288- 0 0 0 0 0 0 0 0 0 0 0 0
56289- 0 0 0 0 0 0 0 0 0 10 10 10
56290- 26 26 26 66 66 66 62 62 62 2 2 6
56291- 2 2 6 38 38 38 10 10 10 26 26 26
56292-238 238 238 253 253 253 253 253 253 253 253 253
56293-253 253 253 253 253 253 253 253 253 253 253 253
56294-253 253 253 253 253 253 231 231 231 238 238 238
56295-253 253 253 253 253 253 253 253 253 253 253 253
56296-253 253 253 253 253 253 253 253 253 253 253 253
56297-253 253 253 253 253 253 253 253 253 253 253 253
56298-253 253 253 253 253 253 253 253 253 253 253 253
56299-253 253 253 253 253 253 231 231 231 6 6 6
56300- 2 2 6 2 2 6 10 10 10 30 30 30
56301- 2 2 6 2 2 6 2 2 6 2 2 6
56302- 2 2 6 66 66 66 58 58 58 22 22 22
56303- 6 6 6 0 0 0 0 0 0 0 0 0
56304- 0 0 0 0 0 0 0 0 0 0 0 0
56305- 0 0 0 0 0 0 0 0 0 0 0 0
56306- 0 0 0 0 0 0 0 0 0 0 0 0
56307- 0 0 0 0 0 0 0 0 0 0 0 0
56308- 0 0 0 0 0 0 0 0 0 0 0 0
56309- 0 0 0 0 0 0 0 0 0 10 10 10
56310- 38 38 38 78 78 78 6 6 6 2 2 6
56311- 2 2 6 46 46 46 14 14 14 42 42 42
56312-246 246 246 253 253 253 253 253 253 253 253 253
56313-253 253 253 253 253 253 253 253 253 253 253 253
56314-253 253 253 253 253 253 231 231 231 242 242 242
56315-253 253 253 253 253 253 253 253 253 253 253 253
56316-253 253 253 253 253 253 253 253 253 253 253 253
56317-253 253 253 253 253 253 253 253 253 253 253 253
56318-253 253 253 253 253 253 253 253 253 253 253 253
56319-253 253 253 253 253 253 234 234 234 10 10 10
56320- 2 2 6 2 2 6 22 22 22 14 14 14
56321- 2 2 6 2 2 6 2 2 6 2 2 6
56322- 2 2 6 66 66 66 62 62 62 22 22 22
56323- 6 6 6 0 0 0 0 0 0 0 0 0
56324- 0 0 0 0 0 0 0 0 0 0 0 0
56325- 0 0 0 0 0 0 0 0 0 0 0 0
56326- 0 0 0 0 0 0 0 0 0 0 0 0
56327- 0 0 0 0 0 0 0 0 0 0 0 0
56328- 0 0 0 0 0 0 0 0 0 0 0 0
56329- 0 0 0 0 0 0 6 6 6 18 18 18
56330- 50 50 50 74 74 74 2 2 6 2 2 6
56331- 14 14 14 70 70 70 34 34 34 62 62 62
56332-250 250 250 253 253 253 253 253 253 253 253 253
56333-253 253 253 253 253 253 253 253 253 253 253 253
56334-253 253 253 253 253 253 231 231 231 246 246 246
56335-253 253 253 253 253 253 253 253 253 253 253 253
56336-253 253 253 253 253 253 253 253 253 253 253 253
56337-253 253 253 253 253 253 253 253 253 253 253 253
56338-253 253 253 253 253 253 253 253 253 253 253 253
56339-253 253 253 253 253 253 234 234 234 14 14 14
56340- 2 2 6 2 2 6 30 30 30 2 2 6
56341- 2 2 6 2 2 6 2 2 6 2 2 6
56342- 2 2 6 66 66 66 62 62 62 22 22 22
56343- 6 6 6 0 0 0 0 0 0 0 0 0
56344- 0 0 0 0 0 0 0 0 0 0 0 0
56345- 0 0 0 0 0 0 0 0 0 0 0 0
56346- 0 0 0 0 0 0 0 0 0 0 0 0
56347- 0 0 0 0 0 0 0 0 0 0 0 0
56348- 0 0 0 0 0 0 0 0 0 0 0 0
56349- 0 0 0 0 0 0 6 6 6 18 18 18
56350- 54 54 54 62 62 62 2 2 6 2 2 6
56351- 2 2 6 30 30 30 46 46 46 70 70 70
56352-250 250 250 253 253 253 253 253 253 253 253 253
56353-253 253 253 253 253 253 253 253 253 253 253 253
56354-253 253 253 253 253 253 231 231 231 246 246 246
56355-253 253 253 253 253 253 253 253 253 253 253 253
56356-253 253 253 253 253 253 253 253 253 253 253 253
56357-253 253 253 253 253 253 253 253 253 253 253 253
56358-253 253 253 253 253 253 253 253 253 253 253 253
56359-253 253 253 253 253 253 226 226 226 10 10 10
56360- 2 2 6 6 6 6 30 30 30 2 2 6
56361- 2 2 6 2 2 6 2 2 6 2 2 6
56362- 2 2 6 66 66 66 58 58 58 22 22 22
56363- 6 6 6 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 0 0 0 0 0 0 0 0 0 0 0 0
56367- 0 0 0 0 0 0 0 0 0 0 0 0
56368- 0 0 0 0 0 0 0 0 0 0 0 0
56369- 0 0 0 0 0 0 6 6 6 22 22 22
56370- 58 58 58 62 62 62 2 2 6 2 2 6
56371- 2 2 6 2 2 6 30 30 30 78 78 78
56372-250 250 250 253 253 253 253 253 253 253 253 253
56373-253 253 253 253 253 253 253 253 253 253 253 253
56374-253 253 253 253 253 253 231 231 231 246 246 246
56375-253 253 253 253 253 253 253 253 253 253 253 253
56376-253 253 253 253 253 253 253 253 253 253 253 253
56377-253 253 253 253 253 253 253 253 253 253 253 253
56378-253 253 253 253 253 253 253 253 253 253 253 253
56379-253 253 253 253 253 253 206 206 206 2 2 6
56380- 22 22 22 34 34 34 18 14 6 22 22 22
56381- 26 26 26 18 18 18 6 6 6 2 2 6
56382- 2 2 6 82 82 82 54 54 54 18 18 18
56383- 6 6 6 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 0 0 0 0 0 0 0 0 0 0 0 0
56387- 0 0 0 0 0 0 0 0 0 0 0 0
56388- 0 0 0 0 0 0 0 0 0 0 0 0
56389- 0 0 0 0 0 0 6 6 6 26 26 26
56390- 62 62 62 106 106 106 74 54 14 185 133 11
56391-210 162 10 121 92 8 6 6 6 62 62 62
56392-238 238 238 253 253 253 253 253 253 253 253 253
56393-253 253 253 253 253 253 253 253 253 253 253 253
56394-253 253 253 253 253 253 231 231 231 246 246 246
56395-253 253 253 253 253 253 253 253 253 253 253 253
56396-253 253 253 253 253 253 253 253 253 253 253 253
56397-253 253 253 253 253 253 253 253 253 253 253 253
56398-253 253 253 253 253 253 253 253 253 253 253 253
56399-253 253 253 253 253 253 158 158 158 18 18 18
56400- 14 14 14 2 2 6 2 2 6 2 2 6
56401- 6 6 6 18 18 18 66 66 66 38 38 38
56402- 6 6 6 94 94 94 50 50 50 18 18 18
56403- 6 6 6 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 0 0 0
56406- 0 0 0 0 0 0 0 0 0 0 0 0
56407- 0 0 0 0 0 0 0 0 0 0 0 0
56408- 0 0 0 0 0 0 0 0 0 6 6 6
56409- 10 10 10 10 10 10 18 18 18 38 38 38
56410- 78 78 78 142 134 106 216 158 10 242 186 14
56411-246 190 14 246 190 14 156 118 10 10 10 10
56412- 90 90 90 238 238 238 253 253 253 253 253 253
56413-253 253 253 253 253 253 253 253 253 253 253 253
56414-253 253 253 253 253 253 231 231 231 250 250 250
56415-253 253 253 253 253 253 253 253 253 253 253 253
56416-253 253 253 253 253 253 253 253 253 253 253 253
56417-253 253 253 253 253 253 253 253 253 253 253 253
56418-253 253 253 253 253 253 253 253 253 246 230 190
56419-238 204 91 238 204 91 181 142 44 37 26 9
56420- 2 2 6 2 2 6 2 2 6 2 2 6
56421- 2 2 6 2 2 6 38 38 38 46 46 46
56422- 26 26 26 106 106 106 54 54 54 18 18 18
56423- 6 6 6 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 0 0 0
56426- 0 0 0 0 0 0 0 0 0 0 0 0
56427- 0 0 0 0 0 0 0 0 0 0 0 0
56428- 0 0 0 6 6 6 14 14 14 22 22 22
56429- 30 30 30 38 38 38 50 50 50 70 70 70
56430-106 106 106 190 142 34 226 170 11 242 186 14
56431-246 190 14 246 190 14 246 190 14 154 114 10
56432- 6 6 6 74 74 74 226 226 226 253 253 253
56433-253 253 253 253 253 253 253 253 253 253 253 253
56434-253 253 253 253 253 253 231 231 231 250 250 250
56435-253 253 253 253 253 253 253 253 253 253 253 253
56436-253 253 253 253 253 253 253 253 253 253 253 253
56437-253 253 253 253 253 253 253 253 253 253 253 253
56438-253 253 253 253 253 253 253 253 253 228 184 62
56439-241 196 14 241 208 19 232 195 16 38 30 10
56440- 2 2 6 2 2 6 2 2 6 2 2 6
56441- 2 2 6 6 6 6 30 30 30 26 26 26
56442-203 166 17 154 142 90 66 66 66 26 26 26
56443- 6 6 6 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 0 0 0
56446- 0 0 0 0 0 0 0 0 0 0 0 0
56447- 0 0 0 0 0 0 0 0 0 0 0 0
56448- 6 6 6 18 18 18 38 38 38 58 58 58
56449- 78 78 78 86 86 86 101 101 101 123 123 123
56450-175 146 61 210 150 10 234 174 13 246 186 14
56451-246 190 14 246 190 14 246 190 14 238 190 10
56452-102 78 10 2 2 6 46 46 46 198 198 198
56453-253 253 253 253 253 253 253 253 253 253 253 253
56454-253 253 253 253 253 253 234 234 234 242 242 242
56455-253 253 253 253 253 253 253 253 253 253 253 253
56456-253 253 253 253 253 253 253 253 253 253 253 253
56457-253 253 253 253 253 253 253 253 253 253 253 253
56458-253 253 253 253 253 253 253 253 253 224 178 62
56459-242 186 14 241 196 14 210 166 10 22 18 6
56460- 2 2 6 2 2 6 2 2 6 2 2 6
56461- 2 2 6 2 2 6 6 6 6 121 92 8
56462-238 202 15 232 195 16 82 82 82 34 34 34
56463- 10 10 10 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 0 0 0
56466- 0 0 0 0 0 0 0 0 0 0 0 0
56467- 0 0 0 0 0 0 0 0 0 0 0 0
56468- 14 14 14 38 38 38 70 70 70 154 122 46
56469-190 142 34 200 144 11 197 138 11 197 138 11
56470-213 154 11 226 170 11 242 186 14 246 190 14
56471-246 190 14 246 190 14 246 190 14 246 190 14
56472-225 175 15 46 32 6 2 2 6 22 22 22
56473-158 158 158 250 250 250 253 253 253 253 253 253
56474-253 253 253 253 253 253 253 253 253 253 253 253
56475-253 253 253 253 253 253 253 253 253 253 253 253
56476-253 253 253 253 253 253 253 253 253 253 253 253
56477-253 253 253 253 253 253 253 253 253 253 253 253
56478-253 253 253 250 250 250 242 242 242 224 178 62
56479-239 182 13 236 186 11 213 154 11 46 32 6
56480- 2 2 6 2 2 6 2 2 6 2 2 6
56481- 2 2 6 2 2 6 61 42 6 225 175 15
56482-238 190 10 236 186 11 112 100 78 42 42 42
56483- 14 14 14 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 0 0 0
56486- 0 0 0 0 0 0 0 0 0 0 0 0
56487- 0 0 0 0 0 0 0 0 0 6 6 6
56488- 22 22 22 54 54 54 154 122 46 213 154 11
56489-226 170 11 230 174 11 226 170 11 226 170 11
56490-236 178 12 242 186 14 246 190 14 246 190 14
56491-246 190 14 246 190 14 246 190 14 246 190 14
56492-241 196 14 184 144 12 10 10 10 2 2 6
56493- 6 6 6 116 116 116 242 242 242 253 253 253
56494-253 253 253 253 253 253 253 253 253 253 253 253
56495-253 253 253 253 253 253 253 253 253 253 253 253
56496-253 253 253 253 253 253 253 253 253 253 253 253
56497-253 253 253 253 253 253 253 253 253 253 253 253
56498-253 253 253 231 231 231 198 198 198 214 170 54
56499-236 178 12 236 178 12 210 150 10 137 92 6
56500- 18 14 6 2 2 6 2 2 6 2 2 6
56501- 6 6 6 70 47 6 200 144 11 236 178 12
56502-239 182 13 239 182 13 124 112 88 58 58 58
56503- 22 22 22 6 6 6 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 0 0 0
56507- 0 0 0 0 0 0 0 0 0 10 10 10
56508- 30 30 30 70 70 70 180 133 36 226 170 11
56509-239 182 13 242 186 14 242 186 14 246 186 14
56510-246 190 14 246 190 14 246 190 14 246 190 14
56511-246 190 14 246 190 14 246 190 14 246 190 14
56512-246 190 14 232 195 16 98 70 6 2 2 6
56513- 2 2 6 2 2 6 66 66 66 221 221 221
56514-253 253 253 253 253 253 253 253 253 253 253 253
56515-253 253 253 253 253 253 253 253 253 253 253 253
56516-253 253 253 253 253 253 253 253 253 253 253 253
56517-253 253 253 253 253 253 253 253 253 253 253 253
56518-253 253 253 206 206 206 198 198 198 214 166 58
56519-230 174 11 230 174 11 216 158 10 192 133 9
56520-163 110 8 116 81 8 102 78 10 116 81 8
56521-167 114 7 197 138 11 226 170 11 239 182 13
56522-242 186 14 242 186 14 162 146 94 78 78 78
56523- 34 34 34 14 14 14 6 6 6 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 0 0 0 0 0 0 0 0 0 6 6 6
56528- 30 30 30 78 78 78 190 142 34 226 170 11
56529-239 182 13 246 190 14 246 190 14 246 190 14
56530-246 190 14 246 190 14 246 190 14 246 190 14
56531-246 190 14 246 190 14 246 190 14 246 190 14
56532-246 190 14 241 196 14 203 166 17 22 18 6
56533- 2 2 6 2 2 6 2 2 6 38 38 38
56534-218 218 218 253 253 253 253 253 253 253 253 253
56535-253 253 253 253 253 253 253 253 253 253 253 253
56536-253 253 253 253 253 253 253 253 253 253 253 253
56537-253 253 253 253 253 253 253 253 253 253 253 253
56538-250 250 250 206 206 206 198 198 198 202 162 69
56539-226 170 11 236 178 12 224 166 10 210 150 10
56540-200 144 11 197 138 11 192 133 9 197 138 11
56541-210 150 10 226 170 11 242 186 14 246 190 14
56542-246 190 14 246 186 14 225 175 15 124 112 88
56543- 62 62 62 30 30 30 14 14 14 6 6 6
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 0 0 0
56547- 0 0 0 0 0 0 0 0 0 10 10 10
56548- 30 30 30 78 78 78 174 135 50 224 166 10
56549-239 182 13 246 190 14 246 190 14 246 190 14
56550-246 190 14 246 190 14 246 190 14 246 190 14
56551-246 190 14 246 190 14 246 190 14 246 190 14
56552-246 190 14 246 190 14 241 196 14 139 102 15
56553- 2 2 6 2 2 6 2 2 6 2 2 6
56554- 78 78 78 250 250 250 253 253 253 253 253 253
56555-253 253 253 253 253 253 253 253 253 253 253 253
56556-253 253 253 253 253 253 253 253 253 253 253 253
56557-253 253 253 253 253 253 253 253 253 253 253 253
56558-250 250 250 214 214 214 198 198 198 190 150 46
56559-219 162 10 236 178 12 234 174 13 224 166 10
56560-216 158 10 213 154 11 213 154 11 216 158 10
56561-226 170 11 239 182 13 246 190 14 246 190 14
56562-246 190 14 246 190 14 242 186 14 206 162 42
56563-101 101 101 58 58 58 30 30 30 14 14 14
56564- 6 6 6 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 0 0 0
56567- 0 0 0 0 0 0 0 0 0 10 10 10
56568- 30 30 30 74 74 74 174 135 50 216 158 10
56569-236 178 12 246 190 14 246 190 14 246 190 14
56570-246 190 14 246 190 14 246 190 14 246 190 14
56571-246 190 14 246 190 14 246 190 14 246 190 14
56572-246 190 14 246 190 14 241 196 14 226 184 13
56573- 61 42 6 2 2 6 2 2 6 2 2 6
56574- 22 22 22 238 238 238 253 253 253 253 253 253
56575-253 253 253 253 253 253 253 253 253 253 253 253
56576-253 253 253 253 253 253 253 253 253 253 253 253
56577-253 253 253 253 253 253 253 253 253 253 253 253
56578-253 253 253 226 226 226 187 187 187 180 133 36
56579-216 158 10 236 178 12 239 182 13 236 178 12
56580-230 174 11 226 170 11 226 170 11 230 174 11
56581-236 178 12 242 186 14 246 190 14 246 190 14
56582-246 190 14 246 190 14 246 186 14 239 182 13
56583-206 162 42 106 106 106 66 66 66 34 34 34
56584- 14 14 14 6 6 6 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 0 0 0 0 0 0
56587- 0 0 0 0 0 0 0 0 0 6 6 6
56588- 26 26 26 70 70 70 163 133 67 213 154 11
56589-236 178 12 246 190 14 246 190 14 246 190 14
56590-246 190 14 246 190 14 246 190 14 246 190 14
56591-246 190 14 246 190 14 246 190 14 246 190 14
56592-246 190 14 246 190 14 246 190 14 241 196 14
56593-190 146 13 18 14 6 2 2 6 2 2 6
56594- 46 46 46 246 246 246 253 253 253 253 253 253
56595-253 253 253 253 253 253 253 253 253 253 253 253
56596-253 253 253 253 253 253 253 253 253 253 253 253
56597-253 253 253 253 253 253 253 253 253 253 253 253
56598-253 253 253 221 221 221 86 86 86 156 107 11
56599-216 158 10 236 178 12 242 186 14 246 186 14
56600-242 186 14 239 182 13 239 182 13 242 186 14
56601-242 186 14 246 186 14 246 190 14 246 190 14
56602-246 190 14 246 190 14 246 190 14 246 190 14
56603-242 186 14 225 175 15 142 122 72 66 66 66
56604- 30 30 30 10 10 10 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 0 0 0 0 0 0
56607- 0 0 0 0 0 0 0 0 0 6 6 6
56608- 26 26 26 70 70 70 163 133 67 210 150 10
56609-236 178 12 246 190 14 246 190 14 246 190 14
56610-246 190 14 246 190 14 246 190 14 246 190 14
56611-246 190 14 246 190 14 246 190 14 246 190 14
56612-246 190 14 246 190 14 246 190 14 246 190 14
56613-232 195 16 121 92 8 34 34 34 106 106 106
56614-221 221 221 253 253 253 253 253 253 253 253 253
56615-253 253 253 253 253 253 253 253 253 253 253 253
56616-253 253 253 253 253 253 253 253 253 253 253 253
56617-253 253 253 253 253 253 253 253 253 253 253 253
56618-242 242 242 82 82 82 18 14 6 163 110 8
56619-216 158 10 236 178 12 242 186 14 246 190 14
56620-246 190 14 246 190 14 246 190 14 246 190 14
56621-246 190 14 246 190 14 246 190 14 246 190 14
56622-246 190 14 246 190 14 246 190 14 246 190 14
56623-246 190 14 246 190 14 242 186 14 163 133 67
56624- 46 46 46 18 18 18 6 6 6 0 0 0
56625- 0 0 0 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 0 0 0 0 0 0
56627- 0 0 0 0 0 0 0 0 0 10 10 10
56628- 30 30 30 78 78 78 163 133 67 210 150 10
56629-236 178 12 246 186 14 246 190 14 246 190 14
56630-246 190 14 246 190 14 246 190 14 246 190 14
56631-246 190 14 246 190 14 246 190 14 246 190 14
56632-246 190 14 246 190 14 246 190 14 246 190 14
56633-241 196 14 215 174 15 190 178 144 253 253 253
56634-253 253 253 253 253 253 253 253 253 253 253 253
56635-253 253 253 253 253 253 253 253 253 253 253 253
56636-253 253 253 253 253 253 253 253 253 253 253 253
56637-253 253 253 253 253 253 253 253 253 218 218 218
56638- 58 58 58 2 2 6 22 18 6 167 114 7
56639-216 158 10 236 178 12 246 186 14 246 190 14
56640-246 190 14 246 190 14 246 190 14 246 190 14
56641-246 190 14 246 190 14 246 190 14 246 190 14
56642-246 190 14 246 190 14 246 190 14 246 190 14
56643-246 190 14 246 186 14 242 186 14 190 150 46
56644- 54 54 54 22 22 22 6 6 6 0 0 0
56645- 0 0 0 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 0 0 0 0 0 0
56647- 0 0 0 0 0 0 0 0 0 14 14 14
56648- 38 38 38 86 86 86 180 133 36 213 154 11
56649-236 178 12 246 186 14 246 190 14 246 190 14
56650-246 190 14 246 190 14 246 190 14 246 190 14
56651-246 190 14 246 190 14 246 190 14 246 190 14
56652-246 190 14 246 190 14 246 190 14 246 190 14
56653-246 190 14 232 195 16 190 146 13 214 214 214
56654-253 253 253 253 253 253 253 253 253 253 253 253
56655-253 253 253 253 253 253 253 253 253 253 253 253
56656-253 253 253 253 253 253 253 253 253 253 253 253
56657-253 253 253 250 250 250 170 170 170 26 26 26
56658- 2 2 6 2 2 6 37 26 9 163 110 8
56659-219 162 10 239 182 13 246 186 14 246 190 14
56660-246 190 14 246 190 14 246 190 14 246 190 14
56661-246 190 14 246 190 14 246 190 14 246 190 14
56662-246 190 14 246 190 14 246 190 14 246 190 14
56663-246 186 14 236 178 12 224 166 10 142 122 72
56664- 46 46 46 18 18 18 6 6 6 0 0 0
56665- 0 0 0 0 0 0 0 0 0 0 0 0
56666- 0 0 0 0 0 0 0 0 0 0 0 0
56667- 0 0 0 0 0 0 6 6 6 18 18 18
56668- 50 50 50 109 106 95 192 133 9 224 166 10
56669-242 186 14 246 190 14 246 190 14 246 190 14
56670-246 190 14 246 190 14 246 190 14 246 190 14
56671-246 190 14 246 190 14 246 190 14 246 190 14
56672-246 190 14 246 190 14 246 190 14 246 190 14
56673-242 186 14 226 184 13 210 162 10 142 110 46
56674-226 226 226 253 253 253 253 253 253 253 253 253
56675-253 253 253 253 253 253 253 253 253 253 253 253
56676-253 253 253 253 253 253 253 253 253 253 253 253
56677-198 198 198 66 66 66 2 2 6 2 2 6
56678- 2 2 6 2 2 6 50 34 6 156 107 11
56679-219 162 10 239 182 13 246 186 14 246 190 14
56680-246 190 14 246 190 14 246 190 14 246 190 14
56681-246 190 14 246 190 14 246 190 14 246 190 14
56682-246 190 14 246 190 14 246 190 14 242 186 14
56683-234 174 13 213 154 11 154 122 46 66 66 66
56684- 30 30 30 10 10 10 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 0 0 0
56686- 0 0 0 0 0 0 0 0 0 0 0 0
56687- 0 0 0 0 0 0 6 6 6 22 22 22
56688- 58 58 58 154 121 60 206 145 10 234 174 13
56689-242 186 14 246 186 14 246 190 14 246 190 14
56690-246 190 14 246 190 14 246 190 14 246 190 14
56691-246 190 14 246 190 14 246 190 14 246 190 14
56692-246 190 14 246 190 14 246 190 14 246 190 14
56693-246 186 14 236 178 12 210 162 10 163 110 8
56694- 61 42 6 138 138 138 218 218 218 250 250 250
56695-253 253 253 253 253 253 253 253 253 250 250 250
56696-242 242 242 210 210 210 144 144 144 66 66 66
56697- 6 6 6 2 2 6 2 2 6 2 2 6
56698- 2 2 6 2 2 6 61 42 6 163 110 8
56699-216 158 10 236 178 12 246 190 14 246 190 14
56700-246 190 14 246 190 14 246 190 14 246 190 14
56701-246 190 14 246 190 14 246 190 14 246 190 14
56702-246 190 14 239 182 13 230 174 11 216 158 10
56703-190 142 34 124 112 88 70 70 70 38 38 38
56704- 18 18 18 6 6 6 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 0 0 0 0 0 0 0 0 0 0 0 0
56707- 0 0 0 0 0 0 6 6 6 22 22 22
56708- 62 62 62 168 124 44 206 145 10 224 166 10
56709-236 178 12 239 182 13 242 186 14 242 186 14
56710-246 186 14 246 190 14 246 190 14 246 190 14
56711-246 190 14 246 190 14 246 190 14 246 190 14
56712-246 190 14 246 190 14 246 190 14 246 190 14
56713-246 190 14 236 178 12 216 158 10 175 118 6
56714- 80 54 7 2 2 6 6 6 6 30 30 30
56715- 54 54 54 62 62 62 50 50 50 38 38 38
56716- 14 14 14 2 2 6 2 2 6 2 2 6
56717- 2 2 6 2 2 6 2 2 6 2 2 6
56718- 2 2 6 6 6 6 80 54 7 167 114 7
56719-213 154 11 236 178 12 246 190 14 246 190 14
56720-246 190 14 246 190 14 246 190 14 246 190 14
56721-246 190 14 242 186 14 239 182 13 239 182 13
56722-230 174 11 210 150 10 174 135 50 124 112 88
56723- 82 82 82 54 54 54 34 34 34 18 18 18
56724- 6 6 6 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 0 0 0
56726- 0 0 0 0 0 0 0 0 0 0 0 0
56727- 0 0 0 0 0 0 6 6 6 18 18 18
56728- 50 50 50 158 118 36 192 133 9 200 144 11
56729-216 158 10 219 162 10 224 166 10 226 170 11
56730-230 174 11 236 178 12 239 182 13 239 182 13
56731-242 186 14 246 186 14 246 190 14 246 190 14
56732-246 190 14 246 190 14 246 190 14 246 190 14
56733-246 186 14 230 174 11 210 150 10 163 110 8
56734-104 69 6 10 10 10 2 2 6 2 2 6
56735- 2 2 6 2 2 6 2 2 6 2 2 6
56736- 2 2 6 2 2 6 2 2 6 2 2 6
56737- 2 2 6 2 2 6 2 2 6 2 2 6
56738- 2 2 6 6 6 6 91 60 6 167 114 7
56739-206 145 10 230 174 11 242 186 14 246 190 14
56740-246 190 14 246 190 14 246 186 14 242 186 14
56741-239 182 13 230 174 11 224 166 10 213 154 11
56742-180 133 36 124 112 88 86 86 86 58 58 58
56743- 38 38 38 22 22 22 10 10 10 6 6 6
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 14 14 14
56748- 34 34 34 70 70 70 138 110 50 158 118 36
56749-167 114 7 180 123 7 192 133 9 197 138 11
56750-200 144 11 206 145 10 213 154 11 219 162 10
56751-224 166 10 230 174 11 239 182 13 242 186 14
56752-246 186 14 246 186 14 246 186 14 246 186 14
56753-239 182 13 216 158 10 185 133 11 152 99 6
56754-104 69 6 18 14 6 2 2 6 2 2 6
56755- 2 2 6 2 2 6 2 2 6 2 2 6
56756- 2 2 6 2 2 6 2 2 6 2 2 6
56757- 2 2 6 2 2 6 2 2 6 2 2 6
56758- 2 2 6 6 6 6 80 54 7 152 99 6
56759-192 133 9 219 162 10 236 178 12 239 182 13
56760-246 186 14 242 186 14 239 182 13 236 178 12
56761-224 166 10 206 145 10 192 133 9 154 121 60
56762- 94 94 94 62 62 62 42 42 42 22 22 22
56763- 14 14 14 6 6 6 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 6 6 6
56768- 18 18 18 34 34 34 58 58 58 78 78 78
56769-101 98 89 124 112 88 142 110 46 156 107 11
56770-163 110 8 167 114 7 175 118 6 180 123 7
56771-185 133 11 197 138 11 210 150 10 219 162 10
56772-226 170 11 236 178 12 236 178 12 234 174 13
56773-219 162 10 197 138 11 163 110 8 130 83 6
56774- 91 60 6 10 10 10 2 2 6 2 2 6
56775- 18 18 18 38 38 38 38 38 38 38 38 38
56776- 38 38 38 38 38 38 38 38 38 38 38 38
56777- 38 38 38 38 38 38 26 26 26 2 2 6
56778- 2 2 6 6 6 6 70 47 6 137 92 6
56779-175 118 6 200 144 11 219 162 10 230 174 11
56780-234 174 13 230 174 11 219 162 10 210 150 10
56781-192 133 9 163 110 8 124 112 88 82 82 82
56782- 50 50 50 30 30 30 14 14 14 6 6 6
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 6 6 6 14 14 14 22 22 22 34 34 34
56789- 42 42 42 58 58 58 74 74 74 86 86 86
56790-101 98 89 122 102 70 130 98 46 121 87 25
56791-137 92 6 152 99 6 163 110 8 180 123 7
56792-185 133 11 197 138 11 206 145 10 200 144 11
56793-180 123 7 156 107 11 130 83 6 104 69 6
56794- 50 34 6 54 54 54 110 110 110 101 98 89
56795- 86 86 86 82 82 82 78 78 78 78 78 78
56796- 78 78 78 78 78 78 78 78 78 78 78 78
56797- 78 78 78 82 82 82 86 86 86 94 94 94
56798-106 106 106 101 101 101 86 66 34 124 80 6
56799-156 107 11 180 123 7 192 133 9 200 144 11
56800-206 145 10 200 144 11 192 133 9 175 118 6
56801-139 102 15 109 106 95 70 70 70 42 42 42
56802- 22 22 22 10 10 10 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 6 6 6 10 10 10
56809- 14 14 14 22 22 22 30 30 30 38 38 38
56810- 50 50 50 62 62 62 74 74 74 90 90 90
56811-101 98 89 112 100 78 121 87 25 124 80 6
56812-137 92 6 152 99 6 152 99 6 152 99 6
56813-138 86 6 124 80 6 98 70 6 86 66 30
56814-101 98 89 82 82 82 58 58 58 46 46 46
56815- 38 38 38 34 34 34 34 34 34 34 34 34
56816- 34 34 34 34 34 34 34 34 34 34 34 34
56817- 34 34 34 34 34 34 38 38 38 42 42 42
56818- 54 54 54 82 82 82 94 86 76 91 60 6
56819-134 86 6 156 107 11 167 114 7 175 118 6
56820-175 118 6 167 114 7 152 99 6 121 87 25
56821-101 98 89 62 62 62 34 34 34 18 18 18
56822- 6 6 6 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 6 6 6 6 6 6 10 10 10
56830- 18 18 18 22 22 22 30 30 30 42 42 42
56831- 50 50 50 66 66 66 86 86 86 101 98 89
56832-106 86 58 98 70 6 104 69 6 104 69 6
56833-104 69 6 91 60 6 82 62 34 90 90 90
56834- 62 62 62 38 38 38 22 22 22 14 14 14
56835- 10 10 10 10 10 10 10 10 10 10 10 10
56836- 10 10 10 10 10 10 6 6 6 10 10 10
56837- 10 10 10 10 10 10 10 10 10 14 14 14
56838- 22 22 22 42 42 42 70 70 70 89 81 66
56839- 80 54 7 104 69 6 124 80 6 137 92 6
56840-134 86 6 116 81 8 100 82 52 86 86 86
56841- 58 58 58 30 30 30 14 14 14 6 6 6
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 6 6 6 10 10 10 14 14 14
56851- 18 18 18 26 26 26 38 38 38 54 54 54
56852- 70 70 70 86 86 86 94 86 76 89 81 66
56853- 89 81 66 86 86 86 74 74 74 50 50 50
56854- 30 30 30 14 14 14 6 6 6 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 6 6 6 18 18 18 34 34 34 58 58 58
56859- 82 82 82 89 81 66 89 81 66 89 81 66
56860- 94 86 66 94 86 76 74 74 74 50 50 50
56861- 26 26 26 14 14 14 6 6 6 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 6 6 6 6 6 6 14 14 14 18 18 18
56872- 30 30 30 38 38 38 46 46 46 54 54 54
56873- 50 50 50 42 42 42 30 30 30 18 18 18
56874- 10 10 10 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 6 6 6 14 14 14 26 26 26
56879- 38 38 38 50 50 50 58 58 58 58 58 58
56880- 54 54 54 42 42 42 30 30 30 18 18 18
56881- 10 10 10 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 6 6 6
56892- 6 6 6 10 10 10 14 14 14 18 18 18
56893- 18 18 18 14 14 14 10 10 10 6 6 6
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 6 6 6
56899- 14 14 14 18 18 18 22 22 22 22 22 22
56900- 18 18 18 14 14 14 10 10 10 6 6 6
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56919+4 4 4 4 4 4
56920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56933+4 4 4 4 4 4
56934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56947+4 4 4 4 4 4
56948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56961+4 4 4 4 4 4
56962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56975+4 4 4 4 4 4
56976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56989+4 4 4 4 4 4
56990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56994+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
56995+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
56996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56999+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57000+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57001+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57003+4 4 4 4 4 4
57004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57008+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57009+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57010+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57013+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57014+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57015+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57016+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57017+4 4 4 4 4 4
57018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57022+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57023+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57024+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57027+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57028+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57029+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57030+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57031+4 4 4 4 4 4
57032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57035+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57036+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57037+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57038+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57040+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57041+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57042+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57043+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57044+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57045+4 4 4 4 4 4
57046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57049+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57050+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57051+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57052+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57053+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57054+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57055+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57056+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57057+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57058+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57059+4 4 4 4 4 4
57060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57063+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57064+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57065+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57066+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57067+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57068+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57069+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57070+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57071+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57072+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57073+4 4 4 4 4 4
57074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57076+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57077+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57078+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57079+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57080+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57081+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57082+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57083+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57084+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57085+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57086+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57087+4 4 4 4 4 4
57088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57090+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57091+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57092+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57093+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57094+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57095+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57096+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57097+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57098+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57099+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57100+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57101+4 4 4 4 4 4
57102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57104+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57105+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57106+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57107+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57108+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57109+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57110+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57111+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57112+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57113+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57114+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57115+4 4 4 4 4 4
57116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57118+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57119+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57120+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57121+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57122+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57123+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57124+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57125+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57126+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57127+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57128+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57129+4 4 4 4 4 4
57130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57131+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57132+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57133+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57134+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57135+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57136+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57137+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57138+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57139+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57140+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57141+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57142+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57143+4 4 4 4 4 4
57144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57145+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57146+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57147+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57148+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57149+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57150+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57151+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57152+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57153+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57154+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57155+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57156+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57157+0 0 0 4 4 4
57158+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57159+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57160+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57161+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57162+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57163+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57164+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57165+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57166+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57167+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57168+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57169+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57170+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57171+2 0 0 0 0 0
57172+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57173+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57174+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57175+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57176+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57177+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57178+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57179+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57180+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57181+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57182+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57183+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57184+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57185+37 38 37 0 0 0
57186+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57187+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57188+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57189+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57190+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57191+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57192+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57193+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57194+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57195+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57196+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57197+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57198+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57199+85 115 134 4 0 0
57200+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57201+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57202+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57203+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57204+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57205+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57206+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57207+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57208+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57209+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57210+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57211+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57212+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57213+60 73 81 4 0 0
57214+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57215+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57216+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57217+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57218+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57219+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57220+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57221+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57222+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57223+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57224+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57225+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57226+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57227+16 19 21 4 0 0
57228+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57229+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57230+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57231+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57232+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57233+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57234+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57235+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57236+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57237+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57238+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57239+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57240+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57241+4 0 0 4 3 3
57242+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57243+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57244+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57246+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57247+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57248+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57249+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57250+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57251+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57252+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57253+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57254+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57255+3 2 2 4 4 4
57256+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57257+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57258+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57259+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57260+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57261+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57262+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57263+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57264+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57265+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57266+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57267+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57268+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57269+4 4 4 4 4 4
57270+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57271+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57272+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57273+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57274+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57275+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57276+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57277+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57278+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57279+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57280+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57281+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57282+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57283+4 4 4 4 4 4
57284+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57285+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57286+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57287+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57288+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57289+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57290+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57291+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57292+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57293+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57294+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57295+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57296+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57297+5 5 5 5 5 5
57298+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57299+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57300+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57301+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57302+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57303+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57304+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57305+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57306+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57307+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57308+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57309+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57310+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57311+5 5 5 4 4 4
57312+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57313+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57314+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57315+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57316+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57317+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57318+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57319+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57320+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57321+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57322+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57323+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57325+4 4 4 4 4 4
57326+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57327+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57328+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57329+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57330+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57331+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57332+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57333+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57334+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57335+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57336+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57337+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57339+4 4 4 4 4 4
57340+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57341+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57342+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57343+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57344+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57345+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57346+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57347+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57348+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57349+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57350+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57353+4 4 4 4 4 4
57354+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57355+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57356+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57357+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57358+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57359+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57360+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57361+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57362+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57363+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57364+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57367+4 4 4 4 4 4
57368+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57369+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57370+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57371+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57372+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57373+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57374+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57375+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57376+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57377+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57378+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57381+4 4 4 4 4 4
57382+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57383+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57384+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57385+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57386+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57387+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57388+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57389+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57390+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57391+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57392+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57395+4 4 4 4 4 4
57396+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57397+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57398+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57399+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57400+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57401+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57402+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57403+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57404+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57405+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57406+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57409+4 4 4 4 4 4
57410+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57411+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57412+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57413+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57414+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57415+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57416+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57417+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57418+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57419+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57420+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57423+4 4 4 4 4 4
57424+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57425+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57426+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57427+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57428+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57429+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57430+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57431+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57432+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57433+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57434+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57437+4 4 4 4 4 4
57438+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57439+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57440+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57441+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57442+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57443+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57444+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57445+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57446+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57447+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57448+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57451+4 4 4 4 4 4
57452+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57453+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57454+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57455+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57456+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57457+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57458+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57459+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57460+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57461+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57462+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57465+4 4 4 4 4 4
57466+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57467+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57468+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57469+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57470+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57471+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57472+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57473+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57474+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57475+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57476+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57479+4 4 4 4 4 4
57480+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57481+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57482+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57483+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57484+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57485+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57486+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57487+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57488+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57489+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57490+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57493+4 4 4 4 4 4
57494+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57495+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57496+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57497+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57498+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57499+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57500+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57501+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57502+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57503+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57504+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57507+4 4 4 4 4 4
57508+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57509+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57510+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57511+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57512+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57513+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57514+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57515+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57516+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57517+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57518+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57521+4 4 4 4 4 4
57522+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57523+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57524+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57525+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57526+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57527+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57528+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57529+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57530+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57531+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57532+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57535+4 4 4 4 4 4
57536+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57537+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57538+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57539+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57540+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57541+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57542+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57543+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57544+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57545+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57546+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57549+4 4 4 4 4 4
57550+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57551+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57552+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57553+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57554+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57555+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57556+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57557+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57558+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57559+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57560+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57563+4 4 4 4 4 4
57564+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57565+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57566+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57567+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57568+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57569+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57570+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57571+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57572+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57573+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57574+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57577+4 4 4 4 4 4
57578+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57579+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57580+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57581+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57582+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57583+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57584+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57585+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57586+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57587+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57588+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57591+4 4 4 4 4 4
57592+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57593+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57594+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57595+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57596+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57597+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57598+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57599+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57600+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57601+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57602+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57605+4 4 4 4 4 4
57606+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57607+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57608+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57609+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57610+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57611+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57612+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57613+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57614+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57615+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57616+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57619+4 4 4 4 4 4
57620+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57621+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57622+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57623+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57624+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57625+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57626+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57627+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57628+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57629+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57630+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57633+4 4 4 4 4 4
57634+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57635+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57636+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57637+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57638+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57639+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57640+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57641+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57642+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57643+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57644+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57647+4 4 4 4 4 4
57648+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57649+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57650+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57651+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57652+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57653+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57654+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57655+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57656+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57657+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57658+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57661+4 4 4 4 4 4
57662+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57663+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57664+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57665+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57666+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57667+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57668+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57669+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57670+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57671+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57672+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57675+4 4 4 4 4 4
57676+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57677+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57678+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57679+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57680+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57681+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57682+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57683+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57684+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57685+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57686+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57689+4 4 4 4 4 4
57690+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57691+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57692+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57693+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57694+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57695+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57696+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57697+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57698+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57699+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57700+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57703+4 4 4 4 4 4
57704+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57705+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57706+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57707+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57708+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57709+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57710+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57711+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57712+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57713+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57714+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57717+4 4 4 4 4 4
57718+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57719+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57720+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57721+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57722+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57723+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57724+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57725+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57726+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57727+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57731+4 4 4 4 4 4
57732+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57733+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57734+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57735+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57736+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57737+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57738+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57739+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57740+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57741+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57745+4 4 4 4 4 4
57746+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57747+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57748+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57749+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57750+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57751+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57752+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57753+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57754+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57755+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57759+4 4 4 4 4 4
57760+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57761+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57762+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57763+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57764+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57765+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57766+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57767+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57768+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57769+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57773+4 4 4 4 4 4
57774+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57775+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57776+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57777+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57778+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57779+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57780+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57781+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57782+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57787+4 4 4 4 4 4
57788+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57789+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57790+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57791+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57792+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57793+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57794+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57795+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57796+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57801+4 4 4 4 4 4
57802+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57803+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57804+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57805+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57806+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57807+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57808+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57809+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57810+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57815+4 4 4 4 4 4
57816+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57817+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57818+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57819+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57820+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57821+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57822+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57823+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57829+4 4 4 4 4 4
57830+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57831+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57832+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57833+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57834+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57835+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57836+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57837+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57843+4 4 4 4 4 4
57844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57845+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57846+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57847+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57848+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57849+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57850+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57851+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57857+4 4 4 4 4 4
57858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57859+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57860+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57861+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57862+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57863+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57864+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57865+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57871+4 4 4 4 4 4
57872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57873+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57874+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57875+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57876+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57877+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57878+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57879+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57885+4 4 4 4 4 4
57886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57888+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57889+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57890+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57891+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57892+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57893+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57899+4 4 4 4 4 4
57900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57903+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57904+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57905+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57906+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57913+4 4 4 4 4 4
57914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57917+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57918+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57919+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57920+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57927+4 4 4 4 4 4
57928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57931+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57932+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57933+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57934+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
57935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57941+4 4 4 4 4 4
57942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57945+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
57946+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
57947+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
57948+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
57949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57955+4 4 4 4 4 4
57956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57960+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
57961+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57962+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57969+4 4 4 4 4 4
57970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57974+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
57975+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
57976+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57983+4 4 4 4 4 4
57984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57988+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
57989+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
57990+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57997+4 4 4 4 4 4
57998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58002+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58003+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58011+4 4 4 4 4 4
58012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58016+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58017+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58025+4 4 4 4 4 4
58026diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58027index fef20db..d28b1ab 100644
58028--- a/drivers/xen/xenfs/xenstored.c
58029+++ b/drivers/xen/xenfs/xenstored.c
58030@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58031 static int xsd_kva_open(struct inode *inode, struct file *file)
58032 {
58033 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58034+#ifdef CONFIG_GRKERNSEC_HIDESYM
58035+ NULL);
58036+#else
58037 xen_store_interface);
58038+#endif
58039+
58040 if (!file->private_data)
58041 return -ENOMEM;
58042 return 0;
58043diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58044index eb14e05..5156de7 100644
58045--- a/fs/9p/vfs_addr.c
58046+++ b/fs/9p/vfs_addr.c
58047@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58048
58049 retval = v9fs_file_write_internal(inode,
58050 v9inode->writeback_fid,
58051- (__force const char __user *)buffer,
58052+ (const char __force_user *)buffer,
58053 len, &offset, 0);
58054 if (retval > 0)
58055 retval = 0;
58056diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58057index 9ee5343..5165e3c 100644
58058--- a/fs/9p/vfs_inode.c
58059+++ b/fs/9p/vfs_inode.c
58060@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58061 void
58062 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58063 {
58064- char *s = nd_get_link(nd);
58065+ const char *s = nd_get_link(nd);
58066
58067 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
58068 dentry, IS_ERR(s) ? "<error>" : s);
58069diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58070index c055d56e..a46f4f5 100644
58071--- a/fs/Kconfig.binfmt
58072+++ b/fs/Kconfig.binfmt
58073@@ -106,7 +106,7 @@ config HAVE_AOUT
58074
58075 config BINFMT_AOUT
58076 tristate "Kernel support for a.out and ECOFF binaries"
58077- depends on HAVE_AOUT
58078+ depends on HAVE_AOUT && BROKEN
58079 ---help---
58080 A.out (Assembler.OUTput) is a set of formats for libraries and
58081 executables used in the earliest versions of UNIX. Linux used
58082diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58083index 8a1d38e..300a14e 100644
58084--- a/fs/afs/inode.c
58085+++ b/fs/afs/inode.c
58086@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58087 struct afs_vnode *vnode;
58088 struct super_block *sb;
58089 struct inode *inode;
58090- static atomic_t afs_autocell_ino;
58091+ static atomic_unchecked_t afs_autocell_ino;
58092
58093 _enter("{%x:%u},%*.*s,",
58094 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58095@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58096 data.fid.unique = 0;
58097 data.fid.vnode = 0;
58098
58099- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58100+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58101 afs_iget5_autocell_test, afs_iget5_set,
58102 &data);
58103 if (!inode) {
58104diff --git a/fs/aio.c b/fs/aio.c
58105index c428871..3f3041b 100644
58106--- a/fs/aio.c
58107+++ b/fs/aio.c
58108@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58109 size += sizeof(struct io_event) * nr_events;
58110
58111 nr_pages = PFN_UP(size);
58112- if (nr_pages < 0)
58113+ if (nr_pages <= 0)
58114 return -EINVAL;
58115
58116 file = aio_private_file(ctx, nr_pages);
58117diff --git a/fs/attr.c b/fs/attr.c
58118index 6530ced..4a827e2 100644
58119--- a/fs/attr.c
58120+++ b/fs/attr.c
58121@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58122 unsigned long limit;
58123
58124 limit = rlimit(RLIMIT_FSIZE);
58125+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58126 if (limit != RLIM_INFINITY && offset > limit)
58127 goto out_sig;
58128 if (offset > inode->i_sb->s_maxbytes)
58129diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58130index 116fd38..c04182da 100644
58131--- a/fs/autofs4/waitq.c
58132+++ b/fs/autofs4/waitq.c
58133@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58134 {
58135 unsigned long sigpipe, flags;
58136 mm_segment_t fs;
58137- const char *data = (const char *)addr;
58138+ const char __user *data = (const char __force_user *)addr;
58139 ssize_t wr = 0;
58140
58141 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58142@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58143 return 1;
58144 }
58145
58146+#ifdef CONFIG_GRKERNSEC_HIDESYM
58147+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58148+#endif
58149+
58150 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58151 enum autofs_notify notify)
58152 {
58153@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58154
58155 /* If this is a direct mount request create a dummy name */
58156 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58157+#ifdef CONFIG_GRKERNSEC_HIDESYM
58158+ /* this name does get written to userland via autofs4_write() */
58159+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58160+#else
58161 qstr.len = sprintf(name, "%p", dentry);
58162+#endif
58163 else {
58164 qstr.len = autofs4_getpath(sbi, dentry, &name);
58165 if (!qstr.len) {
58166diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58167index 2722387..56059b5 100644
58168--- a/fs/befs/endian.h
58169+++ b/fs/befs/endian.h
58170@@ -11,7 +11,7 @@
58171
58172 #include <asm/byteorder.h>
58173
58174-static inline u64
58175+static inline u64 __intentional_overflow(-1)
58176 fs64_to_cpu(const struct super_block *sb, fs64 n)
58177 {
58178 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58179@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58180 return (__force fs64)cpu_to_be64(n);
58181 }
58182
58183-static inline u32
58184+static inline u32 __intentional_overflow(-1)
58185 fs32_to_cpu(const struct super_block *sb, fs32 n)
58186 {
58187 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58188@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58189 return (__force fs32)cpu_to_be32(n);
58190 }
58191
58192-static inline u16
58193+static inline u16 __intentional_overflow(-1)
58194 fs16_to_cpu(const struct super_block *sb, fs16 n)
58195 {
58196 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58197diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58198index 4c55668..eeae150 100644
58199--- a/fs/binfmt_aout.c
58200+++ b/fs/binfmt_aout.c
58201@@ -16,6 +16,7 @@
58202 #include <linux/string.h>
58203 #include <linux/fs.h>
58204 #include <linux/file.h>
58205+#include <linux/security.h>
58206 #include <linux/stat.h>
58207 #include <linux/fcntl.h>
58208 #include <linux/ptrace.h>
58209@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58210 #endif
58211 # define START_STACK(u) ((void __user *)u.start_stack)
58212
58213+ memset(&dump, 0, sizeof(dump));
58214+
58215 fs = get_fs();
58216 set_fs(KERNEL_DS);
58217 has_dumped = 1;
58218@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58219
58220 /* If the size of the dump file exceeds the rlimit, then see what would happen
58221 if we wrote the stack, but not the data area. */
58222+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58223 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58224 dump.u_dsize = 0;
58225
58226 /* Make sure we have enough room to write the stack and data areas. */
58227+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58228 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58229 dump.u_ssize = 0;
58230
58231@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58232 rlim = rlimit(RLIMIT_DATA);
58233 if (rlim >= RLIM_INFINITY)
58234 rlim = ~0;
58235+
58236+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58237 if (ex.a_data + ex.a_bss > rlim)
58238 return -ENOMEM;
58239
58240@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58241
58242 install_exec_creds(bprm);
58243
58244+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58245+ current->mm->pax_flags = 0UL;
58246+#endif
58247+
58248+#ifdef CONFIG_PAX_PAGEEXEC
58249+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58250+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58251+
58252+#ifdef CONFIG_PAX_EMUTRAMP
58253+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58254+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58255+#endif
58256+
58257+#ifdef CONFIG_PAX_MPROTECT
58258+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58259+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58260+#endif
58261+
58262+ }
58263+#endif
58264+
58265 if (N_MAGIC(ex) == OMAGIC) {
58266 unsigned long text_addr, map_size;
58267 loff_t pos;
58268@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58269 return error;
58270
58271 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58272- PROT_READ | PROT_WRITE | PROT_EXEC,
58273+ PROT_READ | PROT_WRITE,
58274 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58275 fd_offset + ex.a_text);
58276 if (error != N_DATADDR(ex))
58277diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58278index 995986b..dcc4ef2 100644
58279--- a/fs/binfmt_elf.c
58280+++ b/fs/binfmt_elf.c
58281@@ -34,6 +34,7 @@
58282 #include <linux/utsname.h>
58283 #include <linux/coredump.h>
58284 #include <linux/sched.h>
58285+#include <linux/xattr.h>
58286 #include <asm/uaccess.h>
58287 #include <asm/param.h>
58288 #include <asm/page.h>
58289@@ -47,7 +48,7 @@
58290
58291 static int load_elf_binary(struct linux_binprm *bprm);
58292 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58293- int, int, unsigned long);
58294+ int, int, unsigned long) __intentional_overflow(-1);
58295
58296 #ifdef CONFIG_USELIB
58297 static int load_elf_library(struct file *);
58298@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58299 #define elf_core_dump NULL
58300 #endif
58301
58302+#ifdef CONFIG_PAX_MPROTECT
58303+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58304+#endif
58305+
58306+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58307+static void elf_handle_mmap(struct file *file);
58308+#endif
58309+
58310 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58311 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58312 #else
58313@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58314 .load_binary = load_elf_binary,
58315 .load_shlib = load_elf_library,
58316 .core_dump = elf_core_dump,
58317+
58318+#ifdef CONFIG_PAX_MPROTECT
58319+ .handle_mprotect= elf_handle_mprotect,
58320+#endif
58321+
58322+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58323+ .handle_mmap = elf_handle_mmap,
58324+#endif
58325+
58326 .min_coredump = ELF_EXEC_PAGESIZE,
58327 };
58328
58329@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58330
58331 static int set_brk(unsigned long start, unsigned long end)
58332 {
58333+ unsigned long e = end;
58334+
58335 start = ELF_PAGEALIGN(start);
58336 end = ELF_PAGEALIGN(end);
58337 if (end > start) {
58338@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58339 if (BAD_ADDR(addr))
58340 return addr;
58341 }
58342- current->mm->start_brk = current->mm->brk = end;
58343+ current->mm->start_brk = current->mm->brk = e;
58344 return 0;
58345 }
58346
58347@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58348 elf_addr_t __user *u_rand_bytes;
58349 const char *k_platform = ELF_PLATFORM;
58350 const char *k_base_platform = ELF_BASE_PLATFORM;
58351- unsigned char k_rand_bytes[16];
58352+ u32 k_rand_bytes[4];
58353 int items;
58354 elf_addr_t *elf_info;
58355 int ei_index = 0;
58356 const struct cred *cred = current_cred();
58357 struct vm_area_struct *vma;
58358+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58359
58360 /*
58361 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58362@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58363 * Generate 16 random bytes for userspace PRNG seeding.
58364 */
58365 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58366- u_rand_bytes = (elf_addr_t __user *)
58367- STACK_ALLOC(p, sizeof(k_rand_bytes));
58368+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58369+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58370+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58371+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58372+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58373+ u_rand_bytes = (elf_addr_t __user *) p;
58374 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58375 return -EFAULT;
58376
58377@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58378 return -EFAULT;
58379 current->mm->env_end = p;
58380
58381+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58382+
58383 /* Put the elf_info on the stack in the right place. */
58384 sp = (elf_addr_t __user *)envp + 1;
58385- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58386+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58387 return -EFAULT;
58388 return 0;
58389 }
58390@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58391 an ELF header */
58392
58393 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58394- struct file *interpreter, unsigned long *interp_map_addr,
58395+ struct file *interpreter,
58396 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58397 {
58398 struct elf_phdr *eppnt;
58399- unsigned long load_addr = 0;
58400+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58401 int load_addr_set = 0;
58402 unsigned long last_bss = 0, elf_bss = 0;
58403- unsigned long error = ~0UL;
58404+ unsigned long error = -EINVAL;
58405 unsigned long total_size;
58406 int i;
58407
58408@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58409 goto out;
58410 }
58411
58412+#ifdef CONFIG_PAX_SEGMEXEC
58413+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58414+ pax_task_size = SEGMEXEC_TASK_SIZE;
58415+#endif
58416+
58417 eppnt = interp_elf_phdata;
58418 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58419 if (eppnt->p_type == PT_LOAD) {
58420@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58421 map_addr = elf_map(interpreter, load_addr + vaddr,
58422 eppnt, elf_prot, elf_type, total_size);
58423 total_size = 0;
58424- if (!*interp_map_addr)
58425- *interp_map_addr = map_addr;
58426 error = map_addr;
58427 if (BAD_ADDR(map_addr))
58428 goto out;
58429@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58430 k = load_addr + eppnt->p_vaddr;
58431 if (BAD_ADDR(k) ||
58432 eppnt->p_filesz > eppnt->p_memsz ||
58433- eppnt->p_memsz > TASK_SIZE ||
58434- TASK_SIZE - eppnt->p_memsz < k) {
58435+ eppnt->p_memsz > pax_task_size ||
58436+ pax_task_size - eppnt->p_memsz < k) {
58437 error = -ENOMEM;
58438 goto out;
58439 }
58440@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58441 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58442
58443 /* Map the last of the bss segment */
58444- error = vm_brk(elf_bss, last_bss - elf_bss);
58445- if (BAD_ADDR(error))
58446- goto out;
58447+ if (last_bss > elf_bss) {
58448+ error = vm_brk(elf_bss, last_bss - elf_bss);
58449+ if (BAD_ADDR(error))
58450+ goto out;
58451+ }
58452 }
58453
58454 error = load_addr;
58455@@ -634,6 +666,336 @@ out:
58456 return error;
58457 }
58458
58459+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58460+#ifdef CONFIG_PAX_SOFTMODE
58461+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58462+{
58463+ unsigned long pax_flags = 0UL;
58464+
58465+#ifdef CONFIG_PAX_PAGEEXEC
58466+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58467+ pax_flags |= MF_PAX_PAGEEXEC;
58468+#endif
58469+
58470+#ifdef CONFIG_PAX_SEGMEXEC
58471+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58472+ pax_flags |= MF_PAX_SEGMEXEC;
58473+#endif
58474+
58475+#ifdef CONFIG_PAX_EMUTRAMP
58476+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58477+ pax_flags |= MF_PAX_EMUTRAMP;
58478+#endif
58479+
58480+#ifdef CONFIG_PAX_MPROTECT
58481+ if (elf_phdata->p_flags & PF_MPROTECT)
58482+ pax_flags |= MF_PAX_MPROTECT;
58483+#endif
58484+
58485+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58486+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58487+ pax_flags |= MF_PAX_RANDMMAP;
58488+#endif
58489+
58490+ return pax_flags;
58491+}
58492+#endif
58493+
58494+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58495+{
58496+ unsigned long pax_flags = 0UL;
58497+
58498+#ifdef CONFIG_PAX_PAGEEXEC
58499+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58500+ pax_flags |= MF_PAX_PAGEEXEC;
58501+#endif
58502+
58503+#ifdef CONFIG_PAX_SEGMEXEC
58504+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58505+ pax_flags |= MF_PAX_SEGMEXEC;
58506+#endif
58507+
58508+#ifdef CONFIG_PAX_EMUTRAMP
58509+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58510+ pax_flags |= MF_PAX_EMUTRAMP;
58511+#endif
58512+
58513+#ifdef CONFIG_PAX_MPROTECT
58514+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58515+ pax_flags |= MF_PAX_MPROTECT;
58516+#endif
58517+
58518+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58519+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58520+ pax_flags |= MF_PAX_RANDMMAP;
58521+#endif
58522+
58523+ return pax_flags;
58524+}
58525+#endif
58526+
58527+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58528+#ifdef CONFIG_PAX_SOFTMODE
58529+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58530+{
58531+ unsigned long pax_flags = 0UL;
58532+
58533+#ifdef CONFIG_PAX_PAGEEXEC
58534+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58535+ pax_flags |= MF_PAX_PAGEEXEC;
58536+#endif
58537+
58538+#ifdef CONFIG_PAX_SEGMEXEC
58539+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58540+ pax_flags |= MF_PAX_SEGMEXEC;
58541+#endif
58542+
58543+#ifdef CONFIG_PAX_EMUTRAMP
58544+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58545+ pax_flags |= MF_PAX_EMUTRAMP;
58546+#endif
58547+
58548+#ifdef CONFIG_PAX_MPROTECT
58549+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58550+ pax_flags |= MF_PAX_MPROTECT;
58551+#endif
58552+
58553+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58554+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58555+ pax_flags |= MF_PAX_RANDMMAP;
58556+#endif
58557+
58558+ return pax_flags;
58559+}
58560+#endif
58561+
58562+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58563+{
58564+ unsigned long pax_flags = 0UL;
58565+
58566+#ifdef CONFIG_PAX_PAGEEXEC
58567+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58568+ pax_flags |= MF_PAX_PAGEEXEC;
58569+#endif
58570+
58571+#ifdef CONFIG_PAX_SEGMEXEC
58572+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58573+ pax_flags |= MF_PAX_SEGMEXEC;
58574+#endif
58575+
58576+#ifdef CONFIG_PAX_EMUTRAMP
58577+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58578+ pax_flags |= MF_PAX_EMUTRAMP;
58579+#endif
58580+
58581+#ifdef CONFIG_PAX_MPROTECT
58582+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58583+ pax_flags |= MF_PAX_MPROTECT;
58584+#endif
58585+
58586+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58587+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58588+ pax_flags |= MF_PAX_RANDMMAP;
58589+#endif
58590+
58591+ return pax_flags;
58592+}
58593+#endif
58594+
58595+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58596+static unsigned long pax_parse_defaults(void)
58597+{
58598+ unsigned long pax_flags = 0UL;
58599+
58600+#ifdef CONFIG_PAX_SOFTMODE
58601+ if (pax_softmode)
58602+ return pax_flags;
58603+#endif
58604+
58605+#ifdef CONFIG_PAX_PAGEEXEC
58606+ pax_flags |= MF_PAX_PAGEEXEC;
58607+#endif
58608+
58609+#ifdef CONFIG_PAX_SEGMEXEC
58610+ pax_flags |= MF_PAX_SEGMEXEC;
58611+#endif
58612+
58613+#ifdef CONFIG_PAX_MPROTECT
58614+ pax_flags |= MF_PAX_MPROTECT;
58615+#endif
58616+
58617+#ifdef CONFIG_PAX_RANDMMAP
58618+ if (randomize_va_space)
58619+ pax_flags |= MF_PAX_RANDMMAP;
58620+#endif
58621+
58622+ return pax_flags;
58623+}
58624+
58625+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58626+{
58627+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58628+
58629+#ifdef CONFIG_PAX_EI_PAX
58630+
58631+#ifdef CONFIG_PAX_SOFTMODE
58632+ if (pax_softmode)
58633+ return pax_flags;
58634+#endif
58635+
58636+ pax_flags = 0UL;
58637+
58638+#ifdef CONFIG_PAX_PAGEEXEC
58639+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58640+ pax_flags |= MF_PAX_PAGEEXEC;
58641+#endif
58642+
58643+#ifdef CONFIG_PAX_SEGMEXEC
58644+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58645+ pax_flags |= MF_PAX_SEGMEXEC;
58646+#endif
58647+
58648+#ifdef CONFIG_PAX_EMUTRAMP
58649+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58650+ pax_flags |= MF_PAX_EMUTRAMP;
58651+#endif
58652+
58653+#ifdef CONFIG_PAX_MPROTECT
58654+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58655+ pax_flags |= MF_PAX_MPROTECT;
58656+#endif
58657+
58658+#ifdef CONFIG_PAX_ASLR
58659+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58660+ pax_flags |= MF_PAX_RANDMMAP;
58661+#endif
58662+
58663+#endif
58664+
58665+ return pax_flags;
58666+
58667+}
58668+
58669+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58670+{
58671+
58672+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58673+ unsigned long i;
58674+
58675+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58676+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58677+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58678+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58679+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58680+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58681+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58682+ return PAX_PARSE_FLAGS_FALLBACK;
58683+
58684+#ifdef CONFIG_PAX_SOFTMODE
58685+ if (pax_softmode)
58686+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58687+ else
58688+#endif
58689+
58690+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58691+ break;
58692+ }
58693+#endif
58694+
58695+ return PAX_PARSE_FLAGS_FALLBACK;
58696+}
58697+
58698+static unsigned long pax_parse_xattr_pax(struct file * const file)
58699+{
58700+
58701+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58702+ ssize_t xattr_size, i;
58703+ unsigned char xattr_value[sizeof("pemrs") - 1];
58704+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58705+
58706+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58707+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58708+ return PAX_PARSE_FLAGS_FALLBACK;
58709+
58710+ for (i = 0; i < xattr_size; i++)
58711+ switch (xattr_value[i]) {
58712+ default:
58713+ return PAX_PARSE_FLAGS_FALLBACK;
58714+
58715+#define parse_flag(option1, option2, flag) \
58716+ case option1: \
58717+ if (pax_flags_hardmode & MF_PAX_##flag) \
58718+ return PAX_PARSE_FLAGS_FALLBACK;\
58719+ pax_flags_hardmode |= MF_PAX_##flag; \
58720+ break; \
58721+ case option2: \
58722+ if (pax_flags_softmode & MF_PAX_##flag) \
58723+ return PAX_PARSE_FLAGS_FALLBACK;\
58724+ pax_flags_softmode |= MF_PAX_##flag; \
58725+ break;
58726+
58727+ parse_flag('p', 'P', PAGEEXEC);
58728+ parse_flag('e', 'E', EMUTRAMP);
58729+ parse_flag('m', 'M', MPROTECT);
58730+ parse_flag('r', 'R', RANDMMAP);
58731+ parse_flag('s', 'S', SEGMEXEC);
58732+
58733+#undef parse_flag
58734+ }
58735+
58736+ if (pax_flags_hardmode & pax_flags_softmode)
58737+ return PAX_PARSE_FLAGS_FALLBACK;
58738+
58739+#ifdef CONFIG_PAX_SOFTMODE
58740+ if (pax_softmode)
58741+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58742+ else
58743+#endif
58744+
58745+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58746+#else
58747+ return PAX_PARSE_FLAGS_FALLBACK;
58748+#endif
58749+
58750+}
58751+
58752+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58753+{
58754+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58755+
58756+ pax_flags = pax_parse_defaults();
58757+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58758+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58759+ xattr_pax_flags = pax_parse_xattr_pax(file);
58760+
58761+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58762+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58763+ pt_pax_flags != xattr_pax_flags)
58764+ return -EINVAL;
58765+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58766+ pax_flags = xattr_pax_flags;
58767+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58768+ pax_flags = pt_pax_flags;
58769+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58770+ pax_flags = ei_pax_flags;
58771+
58772+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58773+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58774+ if ((__supported_pte_mask & _PAGE_NX))
58775+ pax_flags &= ~MF_PAX_SEGMEXEC;
58776+ else
58777+ pax_flags &= ~MF_PAX_PAGEEXEC;
58778+ }
58779+#endif
58780+
58781+ if (0 > pax_check_flags(&pax_flags))
58782+ return -EINVAL;
58783+
58784+ current->mm->pax_flags = pax_flags;
58785+ return 0;
58786+}
58787+#endif
58788+
58789 /*
58790 * These are the functions used to load ELF style executables and shared
58791 * libraries. There is no binary dependent code anywhere else.
58792@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58793 {
58794 unsigned long random_variable = 0;
58795
58796+#ifdef CONFIG_PAX_RANDUSTACK
58797+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58798+ return stack_top - current->mm->delta_stack;
58799+#endif
58800+
58801 if ((current->flags & PF_RANDOMIZE) &&
58802 !(current->personality & ADDR_NO_RANDOMIZE)) {
58803 random_variable = (unsigned long) get_random_int();
58804@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58805 unsigned long load_addr = 0, load_bias = 0;
58806 int load_addr_set = 0;
58807 char * elf_interpreter = NULL;
58808- unsigned long error;
58809+ unsigned long error = 0;
58810 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58811 unsigned long elf_bss, elf_brk;
58812 int retval, i;
58813@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58814 struct elfhdr interp_elf_ex;
58815 } *loc;
58816 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58817+ unsigned long pax_task_size;
58818
58819 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58820 if (!loc) {
58821@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58822 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58823 may depend on the personality. */
58824 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58825+
58826+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58827+ current->mm->pax_flags = 0UL;
58828+#endif
58829+
58830+#ifdef CONFIG_PAX_DLRESOLVE
58831+ current->mm->call_dl_resolve = 0UL;
58832+#endif
58833+
58834+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58835+ current->mm->call_syscall = 0UL;
58836+#endif
58837+
58838+#ifdef CONFIG_PAX_ASLR
58839+ current->mm->delta_mmap = 0UL;
58840+ current->mm->delta_stack = 0UL;
58841+#endif
58842+
58843+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58844+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58845+ send_sig(SIGKILL, current, 0);
58846+ goto out_free_dentry;
58847+ }
58848+#endif
58849+
58850+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58851+ pax_set_initial_flags(bprm);
58852+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58853+ if (pax_set_initial_flags_func)
58854+ (pax_set_initial_flags_func)(bprm);
58855+#endif
58856+
58857+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58858+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58859+ current->mm->context.user_cs_limit = PAGE_SIZE;
58860+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58861+ }
58862+#endif
58863+
58864+#ifdef CONFIG_PAX_SEGMEXEC
58865+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58866+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58867+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58868+ pax_task_size = SEGMEXEC_TASK_SIZE;
58869+ current->mm->def_flags |= VM_NOHUGEPAGE;
58870+ } else
58871+#endif
58872+
58873+ pax_task_size = TASK_SIZE;
58874+
58875+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58876+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58877+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58878+ put_cpu();
58879+ }
58880+#endif
58881+
58882+#ifdef CONFIG_PAX_ASLR
58883+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58884+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58885+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58886+ }
58887+#endif
58888+
58889+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58890+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58891+ executable_stack = EXSTACK_DISABLE_X;
58892+ current->personality &= ~READ_IMPLIES_EXEC;
58893+ } else
58894+#endif
58895+
58896 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58897 current->personality |= READ_IMPLIES_EXEC;
58898
58899@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58900 #else
58901 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58902 #endif
58903+
58904+#ifdef CONFIG_PAX_RANDMMAP
58905+ /* PaX: randomize base address at the default exe base if requested */
58906+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58907+#ifdef CONFIG_SPARC64
58908+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58909+#else
58910+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58911+#endif
58912+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58913+ elf_flags |= MAP_FIXED;
58914+ }
58915+#endif
58916+
58917 }
58918
58919 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58920@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58921 * allowed task size. Note that p_filesz must always be
58922 * <= p_memsz so it is only necessary to check p_memsz.
58923 */
58924- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58925- elf_ppnt->p_memsz > TASK_SIZE ||
58926- TASK_SIZE - elf_ppnt->p_memsz < k) {
58927+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58928+ elf_ppnt->p_memsz > pax_task_size ||
58929+ pax_task_size - elf_ppnt->p_memsz < k) {
58930 /* set_brk can never work. Avoid overflows. */
58931 retval = -EINVAL;
58932 goto out_free_dentry;
58933@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
58934 if (retval)
58935 goto out_free_dentry;
58936 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
58937- retval = -EFAULT; /* Nobody gets to see this, but.. */
58938- goto out_free_dentry;
58939+ /*
58940+ * This bss-zeroing can fail if the ELF
58941+ * file specifies odd protections. So
58942+ * we don't check the return value
58943+ */
58944 }
58945
58946+#ifdef CONFIG_PAX_RANDMMAP
58947+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58948+ unsigned long start, size, flags;
58949+ vm_flags_t vm_flags;
58950+
58951+ start = ELF_PAGEALIGN(elf_brk);
58952+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
58953+ flags = MAP_FIXED | MAP_PRIVATE;
58954+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
58955+
58956+ down_write(&current->mm->mmap_sem);
58957+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
58958+ retval = -ENOMEM;
58959+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
58960+// if (current->personality & ADDR_NO_RANDOMIZE)
58961+// vm_flags |= VM_READ | VM_MAYREAD;
58962+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
58963+ retval = IS_ERR_VALUE(start) ? start : 0;
58964+ }
58965+ up_write(&current->mm->mmap_sem);
58966+ if (retval == 0)
58967+ retval = set_brk(start + size, start + size + PAGE_SIZE);
58968+ if (retval < 0)
58969+ goto out_free_dentry;
58970+ }
58971+#endif
58972+
58973 if (elf_interpreter) {
58974- unsigned long interp_map_addr = 0;
58975-
58976 elf_entry = load_elf_interp(&loc->interp_elf_ex,
58977 interpreter,
58978- &interp_map_addr,
58979 load_bias, interp_elf_phdata);
58980 if (!IS_ERR((void *)elf_entry)) {
58981 /*
58982@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58983 * Decide what to dump of a segment, part, all or none.
58984 */
58985 static unsigned long vma_dump_size(struct vm_area_struct *vma,
58986- unsigned long mm_flags)
58987+ unsigned long mm_flags, long signr)
58988 {
58989 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
58990
58991@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58992 if (vma->vm_file == NULL)
58993 return 0;
58994
58995- if (FILTER(MAPPED_PRIVATE))
58996+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
58997 goto whole;
58998
58999 /*
59000@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59001 {
59002 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59003 int i = 0;
59004- do
59005+ do {
59006 i += 2;
59007- while (auxv[i - 2] != AT_NULL);
59008+ } while (auxv[i - 2] != AT_NULL);
59009 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59010 }
59011
59012@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59013 {
59014 mm_segment_t old_fs = get_fs();
59015 set_fs(KERNEL_DS);
59016- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59017+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59018 set_fs(old_fs);
59019 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59020 }
59021@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59022 vma = next_vma(vma, gate_vma)) {
59023 unsigned long dump_size;
59024
59025- dump_size = vma_dump_size(vma, cprm->mm_flags);
59026+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59027 vma_filesz[i++] = dump_size;
59028 vma_data_size += dump_size;
59029 }
59030@@ -2314,6 +2794,167 @@ out:
59031
59032 #endif /* CONFIG_ELF_CORE */
59033
59034+#ifdef CONFIG_PAX_MPROTECT
59035+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59036+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59037+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59038+ *
59039+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59040+ * basis because we want to allow the common case and not the special ones.
59041+ */
59042+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59043+{
59044+ struct elfhdr elf_h;
59045+ struct elf_phdr elf_p;
59046+ unsigned long i;
59047+ unsigned long oldflags;
59048+ bool is_textrel_rw, is_textrel_rx, is_relro;
59049+
59050+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59051+ return;
59052+
59053+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59054+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59055+
59056+#ifdef CONFIG_PAX_ELFRELOCS
59057+ /* possible TEXTREL */
59058+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59059+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59060+#else
59061+ is_textrel_rw = false;
59062+ is_textrel_rx = false;
59063+#endif
59064+
59065+ /* possible RELRO */
59066+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59067+
59068+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59069+ return;
59070+
59071+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59072+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59073+
59074+#ifdef CONFIG_PAX_ETEXECRELOCS
59075+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59076+#else
59077+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59078+#endif
59079+
59080+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59081+ !elf_check_arch(&elf_h) ||
59082+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59083+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59084+ return;
59085+
59086+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59087+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59088+ return;
59089+ switch (elf_p.p_type) {
59090+ case PT_DYNAMIC:
59091+ if (!is_textrel_rw && !is_textrel_rx)
59092+ continue;
59093+ i = 0UL;
59094+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59095+ elf_dyn dyn;
59096+
59097+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59098+ break;
59099+ if (dyn.d_tag == DT_NULL)
59100+ break;
59101+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59102+ gr_log_textrel(vma);
59103+ if (is_textrel_rw)
59104+ vma->vm_flags |= VM_MAYWRITE;
59105+ else
59106+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59107+ vma->vm_flags &= ~VM_MAYWRITE;
59108+ break;
59109+ }
59110+ i++;
59111+ }
59112+ is_textrel_rw = false;
59113+ is_textrel_rx = false;
59114+ continue;
59115+
59116+ case PT_GNU_RELRO:
59117+ if (!is_relro)
59118+ continue;
59119+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59120+ vma->vm_flags &= ~VM_MAYWRITE;
59121+ is_relro = false;
59122+ continue;
59123+
59124+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59125+ case PT_PAX_FLAGS: {
59126+ const char *msg_mprotect = "", *msg_emutramp = "";
59127+ char *buffer_lib, *buffer_exe;
59128+
59129+ if (elf_p.p_flags & PF_NOMPROTECT)
59130+ msg_mprotect = "MPROTECT disabled";
59131+
59132+#ifdef CONFIG_PAX_EMUTRAMP
59133+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59134+ msg_emutramp = "EMUTRAMP enabled";
59135+#endif
59136+
59137+ if (!msg_mprotect[0] && !msg_emutramp[0])
59138+ continue;
59139+
59140+ if (!printk_ratelimit())
59141+ continue;
59142+
59143+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59144+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59145+ if (buffer_lib && buffer_exe) {
59146+ char *path_lib, *path_exe;
59147+
59148+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59149+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59150+
59151+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59152+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59153+
59154+ }
59155+ free_page((unsigned long)buffer_exe);
59156+ free_page((unsigned long)buffer_lib);
59157+ continue;
59158+ }
59159+#endif
59160+
59161+ }
59162+ }
59163+}
59164+#endif
59165+
59166+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59167+
59168+extern int grsec_enable_log_rwxmaps;
59169+
59170+static void elf_handle_mmap(struct file *file)
59171+{
59172+ struct elfhdr elf_h;
59173+ struct elf_phdr elf_p;
59174+ unsigned long i;
59175+
59176+ if (!grsec_enable_log_rwxmaps)
59177+ return;
59178+
59179+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59180+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59181+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59182+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59183+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59184+ return;
59185+
59186+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59187+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59188+ return;
59189+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59190+ gr_log_ptgnustack(file);
59191+ }
59192+}
59193+#endif
59194+
59195 static int __init init_elf_binfmt(void)
59196 {
59197 register_binfmt(&elf_format);
59198diff --git a/fs/block_dev.c b/fs/block_dev.c
59199index b48c41b..e070416 100644
59200--- a/fs/block_dev.c
59201+++ b/fs/block_dev.c
59202@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59203 else if (bdev->bd_contains == bdev)
59204 return true; /* is a whole device which isn't held */
59205
59206- else if (whole->bd_holder == bd_may_claim)
59207+ else if (whole->bd_holder == (void *)bd_may_claim)
59208 return true; /* is a partition of a device that is being partitioned */
59209 else if (whole->bd_holder != NULL)
59210 return false; /* is a partition of a held device */
59211diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59212index f54511d..58acdec 100644
59213--- a/fs/btrfs/ctree.c
59214+++ b/fs/btrfs/ctree.c
59215@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59216 free_extent_buffer(buf);
59217 add_root_to_dirty_list(root);
59218 } else {
59219- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59220- parent_start = parent->start;
59221- else
59222+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59223+ if (parent)
59224+ parent_start = parent->start;
59225+ else
59226+ parent_start = 0;
59227+ } else
59228 parent_start = 0;
59229
59230 WARN_ON(trans->transid != btrfs_header_generation(parent));
59231diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59232index de4e70f..b41dc45 100644
59233--- a/fs/btrfs/delayed-inode.c
59234+++ b/fs/btrfs/delayed-inode.c
59235@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59236
59237 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59238 {
59239- int seq = atomic_inc_return(&delayed_root->items_seq);
59240+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59241 if ((atomic_dec_return(&delayed_root->items) <
59242 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59243 waitqueue_active(&delayed_root->wait))
59244@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59245
59246 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59247 {
59248- int val = atomic_read(&delayed_root->items_seq);
59249+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59250
59251 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59252 return 1;
59253@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59254 int seq;
59255 int ret;
59256
59257- seq = atomic_read(&delayed_root->items_seq);
59258+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59259
59260 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59261 if (ret)
59262diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59263index f70119f..ab5894d 100644
59264--- a/fs/btrfs/delayed-inode.h
59265+++ b/fs/btrfs/delayed-inode.h
59266@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59267 */
59268 struct list_head prepare_list;
59269 atomic_t items; /* for delayed items */
59270- atomic_t items_seq; /* for delayed items */
59271+ atomic_unchecked_t items_seq; /* for delayed items */
59272 int nodes; /* for delayed nodes */
59273 wait_queue_head_t wait;
59274 };
59275@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59276 struct btrfs_delayed_root *delayed_root)
59277 {
59278 atomic_set(&delayed_root->items, 0);
59279- atomic_set(&delayed_root->items_seq, 0);
59280+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59281 delayed_root->nodes = 0;
59282 spin_lock_init(&delayed_root->lock);
59283 init_waitqueue_head(&delayed_root->wait);
59284diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59285index 6f49b28..483410f 100644
59286--- a/fs/btrfs/super.c
59287+++ b/fs/btrfs/super.c
59288@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59289 function, line, errstr);
59290 return;
59291 }
59292- ACCESS_ONCE(trans->transaction->aborted) = errno;
59293+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59294 /* Wake up anybody who may be waiting on this transaction */
59295 wake_up(&root->fs_info->transaction_wait);
59296 wake_up(&root->fs_info->transaction_blocked_wait);
59297diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59298index 92db3f6..898a561 100644
59299--- a/fs/btrfs/sysfs.c
59300+++ b/fs/btrfs/sysfs.c
59301@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59302 for (set = 0; set < FEAT_MAX; set++) {
59303 int i;
59304 struct attribute *attrs[2];
59305- struct attribute_group agroup = {
59306+ attribute_group_no_const agroup = {
59307 .name = "features",
59308 .attrs = attrs,
59309 };
59310diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59311index 2299bfd..4098e72 100644
59312--- a/fs/btrfs/tests/free-space-tests.c
59313+++ b/fs/btrfs/tests/free-space-tests.c
59314@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59315 * extent entry.
59316 */
59317 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59318- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59319+ pax_open_kernel();
59320+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59321+ pax_close_kernel();
59322
59323 /*
59324 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59325@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59326 if (ret)
59327 return ret;
59328
59329- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59330+ pax_open_kernel();
59331+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59332+ pax_close_kernel();
59333 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59334
59335 return 0;
59336diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59337index 154990c..d0cf699 100644
59338--- a/fs/btrfs/tree-log.h
59339+++ b/fs/btrfs/tree-log.h
59340@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59341 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59342 struct btrfs_trans_handle *trans)
59343 {
59344- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59345+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59346 }
59347
59348 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59349diff --git a/fs/buffer.c b/fs/buffer.c
59350index 20805db..2e8fc69 100644
59351--- a/fs/buffer.c
59352+++ b/fs/buffer.c
59353@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59354 bh_cachep = kmem_cache_create("buffer_head",
59355 sizeof(struct buffer_head), 0,
59356 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59357- SLAB_MEM_SPREAD),
59358+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59359 NULL);
59360
59361 /*
59362diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59363index fbb08e9..0fda764 100644
59364--- a/fs/cachefiles/bind.c
59365+++ b/fs/cachefiles/bind.c
59366@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59367 args);
59368
59369 /* start by checking things over */
59370- ASSERT(cache->fstop_percent >= 0 &&
59371- cache->fstop_percent < cache->fcull_percent &&
59372+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59373 cache->fcull_percent < cache->frun_percent &&
59374 cache->frun_percent < 100);
59375
59376- ASSERT(cache->bstop_percent >= 0 &&
59377- cache->bstop_percent < cache->bcull_percent &&
59378+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59379 cache->bcull_percent < cache->brun_percent &&
59380 cache->brun_percent < 100);
59381
59382diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59383index ce1b115..4a6852c 100644
59384--- a/fs/cachefiles/daemon.c
59385+++ b/fs/cachefiles/daemon.c
59386@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59387 if (n > buflen)
59388 return -EMSGSIZE;
59389
59390- if (copy_to_user(_buffer, buffer, n) != 0)
59391+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59392 return -EFAULT;
59393
59394 return n;
59395@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59396 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59397 return -EIO;
59398
59399- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59400+ if (datalen > PAGE_SIZE - 1)
59401 return -EOPNOTSUPP;
59402
59403 /* drag the command string into the kernel so we can parse it */
59404@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59405 if (args[0] != '%' || args[1] != '\0')
59406 return -EINVAL;
59407
59408- if (fstop < 0 || fstop >= cache->fcull_percent)
59409+ if (fstop >= cache->fcull_percent)
59410 return cachefiles_daemon_range_error(cache, args);
59411
59412 cache->fstop_percent = fstop;
59413@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59414 if (args[0] != '%' || args[1] != '\0')
59415 return -EINVAL;
59416
59417- if (bstop < 0 || bstop >= cache->bcull_percent)
59418+ if (bstop >= cache->bcull_percent)
59419 return cachefiles_daemon_range_error(cache, args);
59420
59421 cache->bstop_percent = bstop;
59422diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59423index 8c52472..c4e3a69 100644
59424--- a/fs/cachefiles/internal.h
59425+++ b/fs/cachefiles/internal.h
59426@@ -66,7 +66,7 @@ struct cachefiles_cache {
59427 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59428 struct rb_root active_nodes; /* active nodes (can't be culled) */
59429 rwlock_t active_lock; /* lock for active_nodes */
59430- atomic_t gravecounter; /* graveyard uniquifier */
59431+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59432 unsigned frun_percent; /* when to stop culling (% files) */
59433 unsigned fcull_percent; /* when to start culling (% files) */
59434 unsigned fstop_percent; /* when to stop allocating (% files) */
59435@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59436 * proc.c
59437 */
59438 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59439-extern atomic_t cachefiles_lookup_histogram[HZ];
59440-extern atomic_t cachefiles_mkdir_histogram[HZ];
59441-extern atomic_t cachefiles_create_histogram[HZ];
59442+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59443+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59444+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59445
59446 extern int __init cachefiles_proc_init(void);
59447 extern void cachefiles_proc_cleanup(void);
59448 static inline
59449-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59450+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59451 {
59452 unsigned long jif = jiffies - start_jif;
59453 if (jif >= HZ)
59454 jif = HZ - 1;
59455- atomic_inc(&histogram[jif]);
59456+ atomic_inc_unchecked(&histogram[jif]);
59457 }
59458
59459 #else
59460diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59461index 7f8e83f..8951aa4 100644
59462--- a/fs/cachefiles/namei.c
59463+++ b/fs/cachefiles/namei.c
59464@@ -309,7 +309,7 @@ try_again:
59465 /* first step is to make up a grave dentry in the graveyard */
59466 sprintf(nbuffer, "%08x%08x",
59467 (uint32_t) get_seconds(),
59468- (uint32_t) atomic_inc_return(&cache->gravecounter));
59469+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59470
59471 /* do the multiway lock magic */
59472 trap = lock_rename(cache->graveyard, dir);
59473diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59474index eccd339..4c1d995 100644
59475--- a/fs/cachefiles/proc.c
59476+++ b/fs/cachefiles/proc.c
59477@@ -14,9 +14,9 @@
59478 #include <linux/seq_file.h>
59479 #include "internal.h"
59480
59481-atomic_t cachefiles_lookup_histogram[HZ];
59482-atomic_t cachefiles_mkdir_histogram[HZ];
59483-atomic_t cachefiles_create_histogram[HZ];
59484+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59485+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59486+atomic_unchecked_t cachefiles_create_histogram[HZ];
59487
59488 /*
59489 * display the latency histogram
59490@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59491 return 0;
59492 default:
59493 index = (unsigned long) v - 3;
59494- x = atomic_read(&cachefiles_lookup_histogram[index]);
59495- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59496- z = atomic_read(&cachefiles_create_histogram[index]);
59497+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59498+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59499+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59500 if (x == 0 && y == 0 && z == 0)
59501 return 0;
59502
59503diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59504index c241603..56bae60 100644
59505--- a/fs/ceph/dir.c
59506+++ b/fs/ceph/dir.c
59507@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59508 struct dentry *dentry, *last;
59509 struct ceph_dentry_info *di;
59510 int err = 0;
59511+ char d_name[DNAME_INLINE_LEN];
59512+ const unsigned char *name;
59513
59514 /* claim ref on last dentry we returned */
59515 last = fi->dentry;
59516@@ -192,7 +194,12 @@ more:
59517
59518 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59519 dentry, dentry, dentry->d_inode);
59520- if (!dir_emit(ctx, dentry->d_name.name,
59521+ name = dentry->d_name.name;
59522+ if (name == dentry->d_iname) {
59523+ memcpy(d_name, name, dentry->d_name.len);
59524+ name = d_name;
59525+ }
59526+ if (!dir_emit(ctx, name,
59527 dentry->d_name.len,
59528 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59529 dentry->d_inode->i_mode >> 12)) {
59530@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59531 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59532 struct ceph_mds_client *mdsc = fsc->mdsc;
59533 unsigned frag = fpos_frag(ctx->pos);
59534- int off = fpos_off(ctx->pos);
59535+ unsigned int off = fpos_off(ctx->pos);
59536 int err;
59537 u32 ftype;
59538 struct ceph_mds_reply_info_parsed *rinfo;
59539diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59540index 50f06cd..c7eba3e 100644
59541--- a/fs/ceph/super.c
59542+++ b/fs/ceph/super.c
59543@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59544 /*
59545 * construct our own bdi so we can control readahead, etc.
59546 */
59547-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59548+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59549
59550 static int ceph_register_bdi(struct super_block *sb,
59551 struct ceph_fs_client *fsc)
59552@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59553 default_backing_dev_info.ra_pages;
59554
59555 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59556- atomic_long_inc_return(&bdi_seq));
59557+ atomic_long_inc_return_unchecked(&bdi_seq));
59558 if (!err)
59559 sb->s_bdi = &fsc->backing_dev_info;
59560 return err;
59561diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59562index 7febcf2..62a5721 100644
59563--- a/fs/cifs/cifs_debug.c
59564+++ b/fs/cifs/cifs_debug.c
59565@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59566
59567 if (strtobool(&c, &bv) == 0) {
59568 #ifdef CONFIG_CIFS_STATS2
59569- atomic_set(&totBufAllocCount, 0);
59570- atomic_set(&totSmBufAllocCount, 0);
59571+ atomic_set_unchecked(&totBufAllocCount, 0);
59572+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59573 #endif /* CONFIG_CIFS_STATS2 */
59574 spin_lock(&cifs_tcp_ses_lock);
59575 list_for_each(tmp1, &cifs_tcp_ses_list) {
59576@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59577 tcon = list_entry(tmp3,
59578 struct cifs_tcon,
59579 tcon_list);
59580- atomic_set(&tcon->num_smbs_sent, 0);
59581+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59582 if (server->ops->clear_stats)
59583 server->ops->clear_stats(tcon);
59584 }
59585@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59586 smBufAllocCount.counter, cifs_min_small);
59587 #ifdef CONFIG_CIFS_STATS2
59588 seq_printf(m, "Total Large %d Small %d Allocations\n",
59589- atomic_read(&totBufAllocCount),
59590- atomic_read(&totSmBufAllocCount));
59591+ atomic_read_unchecked(&totBufAllocCount),
59592+ atomic_read_unchecked(&totSmBufAllocCount));
59593 #endif /* CONFIG_CIFS_STATS2 */
59594
59595 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59596@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59597 if (tcon->need_reconnect)
59598 seq_puts(m, "\tDISCONNECTED ");
59599 seq_printf(m, "\nSMBs: %d",
59600- atomic_read(&tcon->num_smbs_sent));
59601+ atomic_read_unchecked(&tcon->num_smbs_sent));
59602 if (server->ops->print_stats)
59603 server->ops->print_stats(m, tcon);
59604 }
59605diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59606index d72fe37..ded5511 100644
59607--- a/fs/cifs/cifsfs.c
59608+++ b/fs/cifs/cifsfs.c
59609@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59610 */
59611 cifs_req_cachep = kmem_cache_create("cifs_request",
59612 CIFSMaxBufSize + max_hdr_size, 0,
59613- SLAB_HWCACHE_ALIGN, NULL);
59614+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59615 if (cifs_req_cachep == NULL)
59616 return -ENOMEM;
59617
59618@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59619 efficient to alloc 1 per page off the slab compared to 17K (5page)
59620 alloc of large cifs buffers even when page debugging is on */
59621 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59622- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59623+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59624 NULL);
59625 if (cifs_sm_req_cachep == NULL) {
59626 mempool_destroy(cifs_req_poolp);
59627@@ -1204,8 +1204,8 @@ init_cifs(void)
59628 atomic_set(&bufAllocCount, 0);
59629 atomic_set(&smBufAllocCount, 0);
59630 #ifdef CONFIG_CIFS_STATS2
59631- atomic_set(&totBufAllocCount, 0);
59632- atomic_set(&totSmBufAllocCount, 0);
59633+ atomic_set_unchecked(&totBufAllocCount, 0);
59634+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59635 #endif /* CONFIG_CIFS_STATS2 */
59636
59637 atomic_set(&midCount, 0);
59638diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59639index 22b289a..bbbba08 100644
59640--- a/fs/cifs/cifsglob.h
59641+++ b/fs/cifs/cifsglob.h
59642@@ -823,35 +823,35 @@ struct cifs_tcon {
59643 __u16 Flags; /* optional support bits */
59644 enum statusEnum tidStatus;
59645 #ifdef CONFIG_CIFS_STATS
59646- atomic_t num_smbs_sent;
59647+ atomic_unchecked_t num_smbs_sent;
59648 union {
59649 struct {
59650- atomic_t num_writes;
59651- atomic_t num_reads;
59652- atomic_t num_flushes;
59653- atomic_t num_oplock_brks;
59654- atomic_t num_opens;
59655- atomic_t num_closes;
59656- atomic_t num_deletes;
59657- atomic_t num_mkdirs;
59658- atomic_t num_posixopens;
59659- atomic_t num_posixmkdirs;
59660- atomic_t num_rmdirs;
59661- atomic_t num_renames;
59662- atomic_t num_t2renames;
59663- atomic_t num_ffirst;
59664- atomic_t num_fnext;
59665- atomic_t num_fclose;
59666- atomic_t num_hardlinks;
59667- atomic_t num_symlinks;
59668- atomic_t num_locks;
59669- atomic_t num_acl_get;
59670- atomic_t num_acl_set;
59671+ atomic_unchecked_t num_writes;
59672+ atomic_unchecked_t num_reads;
59673+ atomic_unchecked_t num_flushes;
59674+ atomic_unchecked_t num_oplock_brks;
59675+ atomic_unchecked_t num_opens;
59676+ atomic_unchecked_t num_closes;
59677+ atomic_unchecked_t num_deletes;
59678+ atomic_unchecked_t num_mkdirs;
59679+ atomic_unchecked_t num_posixopens;
59680+ atomic_unchecked_t num_posixmkdirs;
59681+ atomic_unchecked_t num_rmdirs;
59682+ atomic_unchecked_t num_renames;
59683+ atomic_unchecked_t num_t2renames;
59684+ atomic_unchecked_t num_ffirst;
59685+ atomic_unchecked_t num_fnext;
59686+ atomic_unchecked_t num_fclose;
59687+ atomic_unchecked_t num_hardlinks;
59688+ atomic_unchecked_t num_symlinks;
59689+ atomic_unchecked_t num_locks;
59690+ atomic_unchecked_t num_acl_get;
59691+ atomic_unchecked_t num_acl_set;
59692 } cifs_stats;
59693 #ifdef CONFIG_CIFS_SMB2
59694 struct {
59695- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59696- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59697+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59698+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59699 } smb2_stats;
59700 #endif /* CONFIG_CIFS_SMB2 */
59701 } stats;
59702@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59703 }
59704
59705 #ifdef CONFIG_CIFS_STATS
59706-#define cifs_stats_inc atomic_inc
59707+#define cifs_stats_inc atomic_inc_unchecked
59708
59709 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59710 unsigned int bytes)
59711@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59712 /* Various Debug counters */
59713 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59714 #ifdef CONFIG_CIFS_STATS2
59715-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59716-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59717+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59718+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59719 #endif
59720 GLOBAL_EXTERN atomic_t smBufAllocCount;
59721 GLOBAL_EXTERN atomic_t midCount;
59722diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59723index 74f1287..7ef0237 100644
59724--- a/fs/cifs/file.c
59725+++ b/fs/cifs/file.c
59726@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
59727 index = mapping->writeback_index; /* Start from prev offset */
59728 end = -1;
59729 } else {
59730- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59731- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59732- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59733+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59734 range_whole = true;
59735+ index = 0;
59736+ end = ULONG_MAX;
59737+ } else {
59738+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59739+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59740+ }
59741 scanned = true;
59742 }
59743 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59744diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59745index 3379463..3af418a 100644
59746--- a/fs/cifs/misc.c
59747+++ b/fs/cifs/misc.c
59748@@ -170,7 +170,7 @@ cifs_buf_get(void)
59749 memset(ret_buf, 0, buf_size + 3);
59750 atomic_inc(&bufAllocCount);
59751 #ifdef CONFIG_CIFS_STATS2
59752- atomic_inc(&totBufAllocCount);
59753+ atomic_inc_unchecked(&totBufAllocCount);
59754 #endif /* CONFIG_CIFS_STATS2 */
59755 }
59756
59757@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59758 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59759 atomic_inc(&smBufAllocCount);
59760 #ifdef CONFIG_CIFS_STATS2
59761- atomic_inc(&totSmBufAllocCount);
59762+ atomic_inc_unchecked(&totSmBufAllocCount);
59763 #endif /* CONFIG_CIFS_STATS2 */
59764
59765 }
59766diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59767index d297903..1cb7516 100644
59768--- a/fs/cifs/smb1ops.c
59769+++ b/fs/cifs/smb1ops.c
59770@@ -622,27 +622,27 @@ static void
59771 cifs_clear_stats(struct cifs_tcon *tcon)
59772 {
59773 #ifdef CONFIG_CIFS_STATS
59774- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59775- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59776- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59777- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59778- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59779- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59780- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59781- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59782- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59783- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59784- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59785- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59786- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59787- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59788- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59789- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59790- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59791- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59792- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59793- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59794- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59795+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59796+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59797+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59798+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59799+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59800+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59801+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59802+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59803+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59804+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59805+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59806+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59807+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59808+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59809+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59810+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59811+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59812+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59813+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59814+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59815+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59816 #endif
59817 }
59818
59819@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59820 {
59821 #ifdef CONFIG_CIFS_STATS
59822 seq_printf(m, " Oplocks breaks: %d",
59823- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59824+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59825 seq_printf(m, "\nReads: %d Bytes: %llu",
59826- atomic_read(&tcon->stats.cifs_stats.num_reads),
59827+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59828 (long long)(tcon->bytes_read));
59829 seq_printf(m, "\nWrites: %d Bytes: %llu",
59830- atomic_read(&tcon->stats.cifs_stats.num_writes),
59831+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59832 (long long)(tcon->bytes_written));
59833 seq_printf(m, "\nFlushes: %d",
59834- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59835+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59836 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59837- atomic_read(&tcon->stats.cifs_stats.num_locks),
59838- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59839- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59840+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59841+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59842+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59843 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59844- atomic_read(&tcon->stats.cifs_stats.num_opens),
59845- atomic_read(&tcon->stats.cifs_stats.num_closes),
59846- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59847+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59848+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59849+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59850 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59851- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59852- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59853+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59854+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59855 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59856- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59857- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59858+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59859+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59860 seq_printf(m, "\nRenames: %d T2 Renames %d",
59861- atomic_read(&tcon->stats.cifs_stats.num_renames),
59862- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59863+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59864+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59865 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59866- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59867- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59868- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59869+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59870+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59871+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59872 #endif
59873 }
59874
59875diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59876index 96b5d40..e5db0c1 100644
59877--- a/fs/cifs/smb2ops.c
59878+++ b/fs/cifs/smb2ops.c
59879@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59880 #ifdef CONFIG_CIFS_STATS
59881 int i;
59882 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59883- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59884- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59885+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59886+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59887 }
59888 #endif
59889 }
59890@@ -459,65 +459,65 @@ static void
59891 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59892 {
59893 #ifdef CONFIG_CIFS_STATS
59894- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59895- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59896+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59897+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59898 seq_printf(m, "\nNegotiates: %d sent %d failed",
59899- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59900- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59901+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59902+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59903 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59904- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59905- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59906+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59907+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59908 seq_printf(m, "\nLogoffs: %d sent %d failed",
59909- atomic_read(&sent[SMB2_LOGOFF_HE]),
59910- atomic_read(&failed[SMB2_LOGOFF_HE]));
59911+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59912+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59913 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59914- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59915- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59916+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59917+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59918 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59919- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59920- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59921+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
59922+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
59923 seq_printf(m, "\nCreates: %d sent %d failed",
59924- atomic_read(&sent[SMB2_CREATE_HE]),
59925- atomic_read(&failed[SMB2_CREATE_HE]));
59926+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
59927+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
59928 seq_printf(m, "\nCloses: %d sent %d failed",
59929- atomic_read(&sent[SMB2_CLOSE_HE]),
59930- atomic_read(&failed[SMB2_CLOSE_HE]));
59931+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
59932+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
59933 seq_printf(m, "\nFlushes: %d sent %d failed",
59934- atomic_read(&sent[SMB2_FLUSH_HE]),
59935- atomic_read(&failed[SMB2_FLUSH_HE]));
59936+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
59937+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
59938 seq_printf(m, "\nReads: %d sent %d failed",
59939- atomic_read(&sent[SMB2_READ_HE]),
59940- atomic_read(&failed[SMB2_READ_HE]));
59941+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
59942+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
59943 seq_printf(m, "\nWrites: %d sent %d failed",
59944- atomic_read(&sent[SMB2_WRITE_HE]),
59945- atomic_read(&failed[SMB2_WRITE_HE]));
59946+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
59947+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
59948 seq_printf(m, "\nLocks: %d sent %d failed",
59949- atomic_read(&sent[SMB2_LOCK_HE]),
59950- atomic_read(&failed[SMB2_LOCK_HE]));
59951+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
59952+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
59953 seq_printf(m, "\nIOCTLs: %d sent %d failed",
59954- atomic_read(&sent[SMB2_IOCTL_HE]),
59955- atomic_read(&failed[SMB2_IOCTL_HE]));
59956+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
59957+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
59958 seq_printf(m, "\nCancels: %d sent %d failed",
59959- atomic_read(&sent[SMB2_CANCEL_HE]),
59960- atomic_read(&failed[SMB2_CANCEL_HE]));
59961+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
59962+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
59963 seq_printf(m, "\nEchos: %d sent %d failed",
59964- atomic_read(&sent[SMB2_ECHO_HE]),
59965- atomic_read(&failed[SMB2_ECHO_HE]));
59966+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
59967+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
59968 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
59969- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
59970- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
59971+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
59972+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
59973 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
59974- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
59975- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
59976+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
59977+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
59978 seq_printf(m, "\nQueryInfos: %d sent %d failed",
59979- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
59980- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
59981+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
59982+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
59983 seq_printf(m, "\nSetInfos: %d sent %d failed",
59984- atomic_read(&sent[SMB2_SET_INFO_HE]),
59985- atomic_read(&failed[SMB2_SET_INFO_HE]));
59986+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
59987+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
59988 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
59989- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
59990- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
59991+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
59992+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
59993 #endif
59994 }
59995
59996diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
59997index 3417340..b942390 100644
59998--- a/fs/cifs/smb2pdu.c
59999+++ b/fs/cifs/smb2pdu.c
60000@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60001 default:
60002 cifs_dbg(VFS, "info level %u isn't supported\n",
60003 srch_inf->info_level);
60004- rc = -EINVAL;
60005- goto qdir_exit;
60006+ return -EINVAL;
60007 }
60008
60009 req->FileIndex = cpu_to_le32(index);
60010diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60011index 46ee6f2..89a9e7f 100644
60012--- a/fs/coda/cache.c
60013+++ b/fs/coda/cache.c
60014@@ -24,7 +24,7 @@
60015 #include "coda_linux.h"
60016 #include "coda_cache.h"
60017
60018-static atomic_t permission_epoch = ATOMIC_INIT(0);
60019+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60020
60021 /* replace or extend an acl cache hit */
60022 void coda_cache_enter(struct inode *inode, int mask)
60023@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60024 struct coda_inode_info *cii = ITOC(inode);
60025
60026 spin_lock(&cii->c_lock);
60027- cii->c_cached_epoch = atomic_read(&permission_epoch);
60028+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60029 if (!uid_eq(cii->c_uid, current_fsuid())) {
60030 cii->c_uid = current_fsuid();
60031 cii->c_cached_perm = mask;
60032@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60033 {
60034 struct coda_inode_info *cii = ITOC(inode);
60035 spin_lock(&cii->c_lock);
60036- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60037+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60038 spin_unlock(&cii->c_lock);
60039 }
60040
60041 /* remove all acl caches */
60042 void coda_cache_clear_all(struct super_block *sb)
60043 {
60044- atomic_inc(&permission_epoch);
60045+ atomic_inc_unchecked(&permission_epoch);
60046 }
60047
60048
60049@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60050 spin_lock(&cii->c_lock);
60051 hit = (mask & cii->c_cached_perm) == mask &&
60052 uid_eq(cii->c_uid, current_fsuid()) &&
60053- cii->c_cached_epoch == atomic_read(&permission_epoch);
60054+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60055 spin_unlock(&cii->c_lock);
60056
60057 return hit;
60058diff --git a/fs/compat.c b/fs/compat.c
60059index 6fd272d..dd34ba2 100644
60060--- a/fs/compat.c
60061+++ b/fs/compat.c
60062@@ -54,7 +54,7 @@
60063 #include <asm/ioctls.h>
60064 #include "internal.h"
60065
60066-int compat_log = 1;
60067+int compat_log = 0;
60068
60069 int compat_printk(const char *fmt, ...)
60070 {
60071@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60072
60073 set_fs(KERNEL_DS);
60074 /* The __user pointer cast is valid because of the set_fs() */
60075- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60076+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60077 set_fs(oldfs);
60078 /* truncating is ok because it's a user address */
60079 if (!ret)
60080@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60081 goto out;
60082
60083 ret = -EINVAL;
60084- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60085+ if (nr_segs > UIO_MAXIOV)
60086 goto out;
60087 if (nr_segs > fast_segs) {
60088 ret = -ENOMEM;
60089@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60090 struct compat_readdir_callback {
60091 struct dir_context ctx;
60092 struct compat_old_linux_dirent __user *dirent;
60093+ struct file * file;
60094 int result;
60095 };
60096
60097@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60098 buf->result = -EOVERFLOW;
60099 return -EOVERFLOW;
60100 }
60101+
60102+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60103+ return 0;
60104+
60105 buf->result++;
60106 dirent = buf->dirent;
60107 if (!access_ok(VERIFY_WRITE, dirent,
60108@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60109 if (!f.file)
60110 return -EBADF;
60111
60112+ buf.file = f.file;
60113 error = iterate_dir(f.file, &buf.ctx);
60114 if (buf.result)
60115 error = buf.result;
60116@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60117 struct dir_context ctx;
60118 struct compat_linux_dirent __user *current_dir;
60119 struct compat_linux_dirent __user *previous;
60120+ struct file * file;
60121 int count;
60122 int error;
60123 };
60124@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60125 buf->error = -EOVERFLOW;
60126 return -EOVERFLOW;
60127 }
60128+
60129+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60130+ return 0;
60131+
60132 dirent = buf->previous;
60133 if (dirent) {
60134 if (__put_user(offset, &dirent->d_off))
60135@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60136 if (!f.file)
60137 return -EBADF;
60138
60139+ buf.file = f.file;
60140 error = iterate_dir(f.file, &buf.ctx);
60141 if (error >= 0)
60142 error = buf.error;
60143@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60144 struct dir_context ctx;
60145 struct linux_dirent64 __user *current_dir;
60146 struct linux_dirent64 __user *previous;
60147+ struct file * file;
60148 int count;
60149 int error;
60150 };
60151@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60152 buf->error = -EINVAL; /* only used if we fail.. */
60153 if (reclen > buf->count)
60154 return -EINVAL;
60155+
60156+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60157+ return 0;
60158+
60159 dirent = buf->previous;
60160
60161 if (dirent) {
60162@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60163 if (!f.file)
60164 return -EBADF;
60165
60166+ buf.file = f.file;
60167 error = iterate_dir(f.file, &buf.ctx);
60168 if (error >= 0)
60169 error = buf.error;
60170diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60171index 4d24d17..4f8c09e 100644
60172--- a/fs/compat_binfmt_elf.c
60173+++ b/fs/compat_binfmt_elf.c
60174@@ -30,11 +30,13 @@
60175 #undef elf_phdr
60176 #undef elf_shdr
60177 #undef elf_note
60178+#undef elf_dyn
60179 #undef elf_addr_t
60180 #define elfhdr elf32_hdr
60181 #define elf_phdr elf32_phdr
60182 #define elf_shdr elf32_shdr
60183 #define elf_note elf32_note
60184+#define elf_dyn Elf32_Dyn
60185 #define elf_addr_t Elf32_Addr
60186
60187 /*
60188diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60189index afec645..9c65620 100644
60190--- a/fs/compat_ioctl.c
60191+++ b/fs/compat_ioctl.c
60192@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60193 return -EFAULT;
60194 if (__get_user(udata, &ss32->iomem_base))
60195 return -EFAULT;
60196- ss.iomem_base = compat_ptr(udata);
60197+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60198 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60199 __get_user(ss.port_high, &ss32->port_high))
60200 return -EFAULT;
60201@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60202 for (i = 0; i < nmsgs; i++) {
60203 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60204 return -EFAULT;
60205- if (get_user(datap, &umsgs[i].buf) ||
60206- put_user(compat_ptr(datap), &tmsgs[i].buf))
60207+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60208+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60209 return -EFAULT;
60210 }
60211 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60212@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60213 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60214 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60215 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60216- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60217+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60218 return -EFAULT;
60219
60220 return ioctl_preallocate(file, p);
60221@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60222 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60223 {
60224 unsigned int a, b;
60225- a = *(unsigned int *)p;
60226- b = *(unsigned int *)q;
60227+ a = *(const unsigned int *)p;
60228+ b = *(const unsigned int *)q;
60229 if (a > b)
60230 return 1;
60231 if (a < b)
60232diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60233index c9c298b..544d100 100644
60234--- a/fs/configfs/dir.c
60235+++ b/fs/configfs/dir.c
60236@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60237 }
60238 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60239 struct configfs_dirent *next;
60240- const char *name;
60241+ const unsigned char * name;
60242+ char d_name[sizeof(next->s_dentry->d_iname)];
60243 int len;
60244 struct inode *inode = NULL;
60245
60246@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60247 continue;
60248
60249 name = configfs_get_name(next);
60250- len = strlen(name);
60251+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60252+ len = next->s_dentry->d_name.len;
60253+ memcpy(d_name, name, len);
60254+ name = d_name;
60255+ } else
60256+ len = strlen(name);
60257
60258 /*
60259 * We'll have a dentry and an inode for
60260diff --git a/fs/coredump.c b/fs/coredump.c
60261index b5c86ff..0dac262 100644
60262--- a/fs/coredump.c
60263+++ b/fs/coredump.c
60264@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60265 struct pipe_inode_info *pipe = file->private_data;
60266
60267 pipe_lock(pipe);
60268- pipe->readers++;
60269- pipe->writers--;
60270+ atomic_inc(&pipe->readers);
60271+ atomic_dec(&pipe->writers);
60272 wake_up_interruptible_sync(&pipe->wait);
60273 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60274 pipe_unlock(pipe);
60275@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60276 * We actually want wait_event_freezable() but then we need
60277 * to clear TIF_SIGPENDING and improve dump_interrupted().
60278 */
60279- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60280+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60281
60282 pipe_lock(pipe);
60283- pipe->readers--;
60284- pipe->writers++;
60285+ atomic_dec(&pipe->readers);
60286+ atomic_inc(&pipe->writers);
60287 pipe_unlock(pipe);
60288 }
60289
60290@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60291 struct files_struct *displaced;
60292 bool need_nonrelative = false;
60293 bool core_dumped = false;
60294- static atomic_t core_dump_count = ATOMIC_INIT(0);
60295+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60296+ long signr = siginfo->si_signo;
60297+ int dumpable;
60298 struct coredump_params cprm = {
60299 .siginfo = siginfo,
60300 .regs = signal_pt_regs(),
60301@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60302 .mm_flags = mm->flags,
60303 };
60304
60305- audit_core_dumps(siginfo->si_signo);
60306+ audit_core_dumps(signr);
60307+
60308+ dumpable = __get_dumpable(cprm.mm_flags);
60309+
60310+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60311+ gr_handle_brute_attach(dumpable);
60312
60313 binfmt = mm->binfmt;
60314 if (!binfmt || !binfmt->core_dump)
60315 goto fail;
60316- if (!__get_dumpable(cprm.mm_flags))
60317+ if (!dumpable)
60318 goto fail;
60319
60320 cred = prepare_creds();
60321@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60322 need_nonrelative = true;
60323 }
60324
60325- retval = coredump_wait(siginfo->si_signo, &core_state);
60326+ retval = coredump_wait(signr, &core_state);
60327 if (retval < 0)
60328 goto fail_creds;
60329
60330@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60331 }
60332 cprm.limit = RLIM_INFINITY;
60333
60334- dump_count = atomic_inc_return(&core_dump_count);
60335+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60336 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60337 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60338 task_tgid_vnr(current), current->comm);
60339@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60340 } else {
60341 struct inode *inode;
60342
60343+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60344+
60345 if (cprm.limit < binfmt->min_coredump)
60346 goto fail_unlock;
60347
60348@@ -681,7 +690,7 @@ close_fail:
60349 filp_close(cprm.file, NULL);
60350 fail_dropcount:
60351 if (ispipe)
60352- atomic_dec(&core_dump_count);
60353+ atomic_dec_unchecked(&core_dump_count);
60354 fail_unlock:
60355 kfree(cn.corename);
60356 coredump_finish(mm, core_dumped);
60357@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60358 struct file *file = cprm->file;
60359 loff_t pos = file->f_pos;
60360 ssize_t n;
60361+
60362+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60363 if (cprm->written + nr > cprm->limit)
60364 return 0;
60365 while (nr) {
60366diff --git a/fs/dcache.c b/fs/dcache.c
60367index e368d4f..b40ba59 100644
60368--- a/fs/dcache.c
60369+++ b/fs/dcache.c
60370@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60371 * dentry_iput drops the locks, at which point nobody (except
60372 * transient RCU lookups) can reach this dentry.
60373 */
60374- BUG_ON((int)dentry->d_lockref.count > 0);
60375+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60376 this_cpu_dec(nr_dentry);
60377 if (dentry->d_op && dentry->d_op->d_release)
60378 dentry->d_op->d_release(dentry);
60379@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60380 struct dentry *parent = dentry->d_parent;
60381 if (IS_ROOT(dentry))
60382 return NULL;
60383- if (unlikely((int)dentry->d_lockref.count < 0))
60384+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60385 return NULL;
60386 if (likely(spin_trylock(&parent->d_lock)))
60387 return parent;
60388@@ -638,7 +638,7 @@ repeat:
60389 dentry->d_flags |= DCACHE_REFERENCED;
60390 dentry_lru_add(dentry);
60391
60392- dentry->d_lockref.count--;
60393+ __lockref_dec(&dentry->d_lockref);
60394 spin_unlock(&dentry->d_lock);
60395 return;
60396
60397@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60398 /* This must be called with d_lock held */
60399 static inline void __dget_dlock(struct dentry *dentry)
60400 {
60401- dentry->d_lockref.count++;
60402+ __lockref_inc(&dentry->d_lockref);
60403 }
60404
60405 static inline void __dget(struct dentry *dentry)
60406@@ -694,8 +694,8 @@ repeat:
60407 goto repeat;
60408 }
60409 rcu_read_unlock();
60410- BUG_ON(!ret->d_lockref.count);
60411- ret->d_lockref.count++;
60412+ BUG_ON(!__lockref_read(&ret->d_lockref));
60413+ __lockref_inc(&ret->d_lockref);
60414 spin_unlock(&ret->d_lock);
60415 return ret;
60416 }
60417@@ -773,9 +773,9 @@ restart:
60418 spin_lock(&inode->i_lock);
60419 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60420 spin_lock(&dentry->d_lock);
60421- if (!dentry->d_lockref.count) {
60422+ if (!__lockref_read(&dentry->d_lockref)) {
60423 struct dentry *parent = lock_parent(dentry);
60424- if (likely(!dentry->d_lockref.count)) {
60425+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60426 __dentry_kill(dentry);
60427 dput(parent);
60428 goto restart;
60429@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60430 * We found an inuse dentry which was not removed from
60431 * the LRU because of laziness during lookup. Do not free it.
60432 */
60433- if ((int)dentry->d_lockref.count > 0) {
60434+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60435 spin_unlock(&dentry->d_lock);
60436 if (parent)
60437 spin_unlock(&parent->d_lock);
60438@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60439 dentry = parent;
60440 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60441 parent = lock_parent(dentry);
60442- if (dentry->d_lockref.count != 1) {
60443- dentry->d_lockref.count--;
60444+ if (__lockref_read(&dentry->d_lockref) != 1) {
60445+ __lockref_inc(&dentry->d_lockref);
60446 spin_unlock(&dentry->d_lock);
60447 if (parent)
60448 spin_unlock(&parent->d_lock);
60449@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60450 * counts, just remove them from the LRU. Otherwise give them
60451 * another pass through the LRU.
60452 */
60453- if (dentry->d_lockref.count) {
60454+ if (__lockref_read(&dentry->d_lockref) > 0) {
60455 d_lru_isolate(dentry);
60456 spin_unlock(&dentry->d_lock);
60457 return LRU_REMOVED;
60458@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60459 } else {
60460 if (dentry->d_flags & DCACHE_LRU_LIST)
60461 d_lru_del(dentry);
60462- if (!dentry->d_lockref.count) {
60463+ if (!__lockref_read(&dentry->d_lockref)) {
60464 d_shrink_add(dentry, &data->dispose);
60465 data->found++;
60466 }
60467@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60468 return D_WALK_CONTINUE;
60469
60470 /* root with refcount 1 is fine */
60471- if (dentry == _data && dentry->d_lockref.count == 1)
60472+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60473 return D_WALK_CONTINUE;
60474
60475 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60476@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60477 dentry->d_inode ?
60478 dentry->d_inode->i_ino : 0UL,
60479 dentry,
60480- dentry->d_lockref.count,
60481+ __lockref_read(&dentry->d_lockref),
60482 dentry->d_sb->s_type->name,
60483 dentry->d_sb->s_id);
60484 WARN_ON(1);
60485@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60486 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60487 if (name->len > DNAME_INLINE_LEN-1) {
60488 size_t size = offsetof(struct external_name, name[1]);
60489- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60490+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60491 if (!p) {
60492 kmem_cache_free(dentry_cache, dentry);
60493 return NULL;
60494@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60495 smp_wmb();
60496 dentry->d_name.name = dname;
60497
60498- dentry->d_lockref.count = 1;
60499+ __lockref_set(&dentry->d_lockref, 1);
60500 dentry->d_flags = 0;
60501 spin_lock_init(&dentry->d_lock);
60502 seqcount_init(&dentry->d_seq);
60503@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60504 dentry->d_sb = sb;
60505 dentry->d_op = NULL;
60506 dentry->d_fsdata = NULL;
60507+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60508+ atomic_set(&dentry->chroot_refcnt, 0);
60509+#endif
60510 INIT_HLIST_BL_NODE(&dentry->d_hash);
60511 INIT_LIST_HEAD(&dentry->d_lru);
60512 INIT_LIST_HEAD(&dentry->d_subdirs);
60513@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60514 goto next;
60515 }
60516
60517- dentry->d_lockref.count++;
60518+ __lockref_inc(&dentry->d_lockref);
60519 found = dentry;
60520 spin_unlock(&dentry->d_lock);
60521 break;
60522@@ -2250,7 +2253,7 @@ again:
60523 spin_lock(&dentry->d_lock);
60524 inode = dentry->d_inode;
60525 isdir = S_ISDIR(inode->i_mode);
60526- if (dentry->d_lockref.count == 1) {
60527+ if (__lockref_read(&dentry->d_lockref) == 1) {
60528 if (!spin_trylock(&inode->i_lock)) {
60529 spin_unlock(&dentry->d_lock);
60530 cpu_relax();
60531@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60532
60533 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60534 dentry->d_flags |= DCACHE_GENOCIDE;
60535- dentry->d_lockref.count--;
60536+ __lockref_dec(&dentry->d_lockref);
60537 }
60538 }
60539 return D_WALK_CONTINUE;
60540@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60541 mempages -= reserve;
60542
60543 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60544- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60545+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60546+ SLAB_NO_SANITIZE, NULL);
60547
60548 dcache_init();
60549 inode_init();
60550diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60551index 6f0ce53..92bba36 100644
60552--- a/fs/debugfs/inode.c
60553+++ b/fs/debugfs/inode.c
60554@@ -423,10 +423,20 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60555 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
60556 * returned.
60557 */
60558+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60559+extern int grsec_enable_sysfs_restrict;
60560+#endif
60561+
60562 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60563 {
60564- return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60565- parent, NULL, NULL);
60566+ umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60567+
60568+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60569+ if (grsec_enable_sysfs_restrict)
60570+ mode = S_IFDIR | S_IRWXU;
60571+#endif
60572+
60573+ return __create_file(name, mode, parent, NULL, NULL);
60574 }
60575 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60576
60577diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60578index 1686dc2..9611c50 100644
60579--- a/fs/ecryptfs/inode.c
60580+++ b/fs/ecryptfs/inode.c
60581@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60582 old_fs = get_fs();
60583 set_fs(get_ds());
60584 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60585- (char __user *)lower_buf,
60586+ (char __force_user *)lower_buf,
60587 PATH_MAX);
60588 set_fs(old_fs);
60589 if (rc < 0)
60590diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60591index e4141f2..d8263e8 100644
60592--- a/fs/ecryptfs/miscdev.c
60593+++ b/fs/ecryptfs/miscdev.c
60594@@ -304,7 +304,7 @@ check_list:
60595 goto out_unlock_msg_ctx;
60596 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60597 if (msg_ctx->msg) {
60598- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60599+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60600 goto out_unlock_msg_ctx;
60601 i += packet_length_size;
60602 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60603diff --git a/fs/exec.c b/fs/exec.c
60604index ad8798e..5f872c9 100644
60605--- a/fs/exec.c
60606+++ b/fs/exec.c
60607@@ -56,8 +56,20 @@
60608 #include <linux/pipe_fs_i.h>
60609 #include <linux/oom.h>
60610 #include <linux/compat.h>
60611+#include <linux/random.h>
60612+#include <linux/seq_file.h>
60613+#include <linux/coredump.h>
60614+#include <linux/mman.h>
60615+
60616+#ifdef CONFIG_PAX_REFCOUNT
60617+#include <linux/kallsyms.h>
60618+#include <linux/kdebug.h>
60619+#endif
60620+
60621+#include <trace/events/fs.h>
60622
60623 #include <asm/uaccess.h>
60624+#include <asm/sections.h>
60625 #include <asm/mmu_context.h>
60626 #include <asm/tlb.h>
60627
60628@@ -66,19 +78,34 @@
60629
60630 #include <trace/events/sched.h>
60631
60632+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60633+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60634+{
60635+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60636+}
60637+#endif
60638+
60639+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60640+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60641+EXPORT_SYMBOL(pax_set_initial_flags_func);
60642+#endif
60643+
60644 int suid_dumpable = 0;
60645
60646 static LIST_HEAD(formats);
60647 static DEFINE_RWLOCK(binfmt_lock);
60648
60649+extern int gr_process_kernel_exec_ban(void);
60650+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60651+
60652 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60653 {
60654 BUG_ON(!fmt);
60655 if (WARN_ON(!fmt->load_binary))
60656 return;
60657 write_lock(&binfmt_lock);
60658- insert ? list_add(&fmt->lh, &formats) :
60659- list_add_tail(&fmt->lh, &formats);
60660+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60661+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60662 write_unlock(&binfmt_lock);
60663 }
60664
60665@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60666 void unregister_binfmt(struct linux_binfmt * fmt)
60667 {
60668 write_lock(&binfmt_lock);
60669- list_del(&fmt->lh);
60670+ pax_list_del((struct list_head *)&fmt->lh);
60671 write_unlock(&binfmt_lock);
60672 }
60673
60674@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60675 int write)
60676 {
60677 struct page *page;
60678- int ret;
60679
60680-#ifdef CONFIG_STACK_GROWSUP
60681- if (write) {
60682- ret = expand_downwards(bprm->vma, pos);
60683- if (ret < 0)
60684- return NULL;
60685- }
60686-#endif
60687- ret = get_user_pages(current, bprm->mm, pos,
60688- 1, write, 1, &page, NULL);
60689- if (ret <= 0)
60690+ if (0 > expand_downwards(bprm->vma, pos))
60691+ return NULL;
60692+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60693 return NULL;
60694
60695 if (write) {
60696@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60697 if (size <= ARG_MAX)
60698 return page;
60699
60700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60701+ // only allow 512KB for argv+env on suid/sgid binaries
60702+ // to prevent easy ASLR exhaustion
60703+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60704+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60705+ (size > (512 * 1024))) {
60706+ put_page(page);
60707+ return NULL;
60708+ }
60709+#endif
60710+
60711 /*
60712 * Limit to 1/4-th the stack size for the argv+env strings.
60713 * This ensures that:
60714@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60715 vma->vm_end = STACK_TOP_MAX;
60716 vma->vm_start = vma->vm_end - PAGE_SIZE;
60717 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60718+
60719+#ifdef CONFIG_PAX_SEGMEXEC
60720+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60721+#endif
60722+
60723 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60724 INIT_LIST_HEAD(&vma->anon_vma_chain);
60725
60726@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60727 arch_bprm_mm_init(mm, vma);
60728 up_write(&mm->mmap_sem);
60729 bprm->p = vma->vm_end - sizeof(void *);
60730+
60731+#ifdef CONFIG_PAX_RANDUSTACK
60732+ if (randomize_va_space)
60733+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60734+#endif
60735+
60736 return 0;
60737 err:
60738 up_write(&mm->mmap_sem);
60739@@ -396,7 +437,7 @@ struct user_arg_ptr {
60740 } ptr;
60741 };
60742
60743-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60744+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60745 {
60746 const char __user *native;
60747
60748@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60749 compat_uptr_t compat;
60750
60751 if (get_user(compat, argv.ptr.compat + nr))
60752- return ERR_PTR(-EFAULT);
60753+ return (const char __force_user *)ERR_PTR(-EFAULT);
60754
60755 return compat_ptr(compat);
60756 }
60757 #endif
60758
60759 if (get_user(native, argv.ptr.native + nr))
60760- return ERR_PTR(-EFAULT);
60761+ return (const char __force_user *)ERR_PTR(-EFAULT);
60762
60763 return native;
60764 }
60765@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60766 if (!p)
60767 break;
60768
60769- if (IS_ERR(p))
60770+ if (IS_ERR((const char __force_kernel *)p))
60771 return -EFAULT;
60772
60773 if (i >= max)
60774@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60775
60776 ret = -EFAULT;
60777 str = get_user_arg_ptr(argv, argc);
60778- if (IS_ERR(str))
60779+ if (IS_ERR((const char __force_kernel *)str))
60780 goto out;
60781
60782 len = strnlen_user(str, MAX_ARG_STRLEN);
60783@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60784 int r;
60785 mm_segment_t oldfs = get_fs();
60786 struct user_arg_ptr argv = {
60787- .ptr.native = (const char __user *const __user *)__argv,
60788+ .ptr.native = (const char __user * const __force_user *)__argv,
60789 };
60790
60791 set_fs(KERNEL_DS);
60792@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60793 unsigned long new_end = old_end - shift;
60794 struct mmu_gather tlb;
60795
60796- BUG_ON(new_start > new_end);
60797+ if (new_start >= new_end || new_start < mmap_min_addr)
60798+ return -ENOMEM;
60799
60800 /*
60801 * ensure there are no vmas between where we want to go
60802@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60803 if (vma != find_vma(mm, new_start))
60804 return -EFAULT;
60805
60806+#ifdef CONFIG_PAX_SEGMEXEC
60807+ BUG_ON(pax_find_mirror_vma(vma));
60808+#endif
60809+
60810 /*
60811 * cover the whole range: [new_start, old_end)
60812 */
60813@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60814 stack_top = arch_align_stack(stack_top);
60815 stack_top = PAGE_ALIGN(stack_top);
60816
60817- if (unlikely(stack_top < mmap_min_addr) ||
60818- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60819- return -ENOMEM;
60820-
60821 stack_shift = vma->vm_end - stack_top;
60822
60823 bprm->p -= stack_shift;
60824@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60825 bprm->exec -= stack_shift;
60826
60827 down_write(&mm->mmap_sem);
60828+
60829+ /* Move stack pages down in memory. */
60830+ if (stack_shift) {
60831+ ret = shift_arg_pages(vma, stack_shift);
60832+ if (ret)
60833+ goto out_unlock;
60834+ }
60835+
60836 vm_flags = VM_STACK_FLAGS;
60837
60838+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60839+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60840+ vm_flags &= ~VM_EXEC;
60841+
60842+#ifdef CONFIG_PAX_MPROTECT
60843+ if (mm->pax_flags & MF_PAX_MPROTECT)
60844+ vm_flags &= ~VM_MAYEXEC;
60845+#endif
60846+
60847+ }
60848+#endif
60849+
60850 /*
60851 * Adjust stack execute permissions; explicitly enable for
60852 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60853@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60854 goto out_unlock;
60855 BUG_ON(prev != vma);
60856
60857- /* Move stack pages down in memory. */
60858- if (stack_shift) {
60859- ret = shift_arg_pages(vma, stack_shift);
60860- if (ret)
60861- goto out_unlock;
60862- }
60863-
60864 /* mprotect_fixup is overkill to remove the temporary stack flags */
60865 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60866
60867@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60868 #endif
60869 current->mm->start_stack = bprm->p;
60870 ret = expand_stack(vma, stack_base);
60871+
60872+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60873+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60874+ unsigned long size;
60875+ vm_flags_t vm_flags;
60876+
60877+ size = STACK_TOP - vma->vm_end;
60878+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60879+
60880+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60881+
60882+#ifdef CONFIG_X86
60883+ if (!ret) {
60884+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60885+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60886+ }
60887+#endif
60888+
60889+ }
60890+#endif
60891+
60892 if (ret)
60893 ret = -EFAULT;
60894
60895@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60896 if (err)
60897 goto exit;
60898
60899- if (name->name[0] != '\0')
60900+ if (name->name[0] != '\0') {
60901 fsnotify_open(file);
60902+ trace_open_exec(name->name);
60903+ }
60904
60905 out:
60906 return file;
60907@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60908 old_fs = get_fs();
60909 set_fs(get_ds());
60910 /* The cast to a user pointer is valid due to the set_fs() */
60911- result = vfs_read(file, (void __user *)addr, count, &pos);
60912+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60913 set_fs(old_fs);
60914 return result;
60915 }
60916@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60917 tsk->mm = mm;
60918 tsk->active_mm = mm;
60919 activate_mm(active_mm, mm);
60920+ populate_stack();
60921 tsk->mm->vmacache_seqnum = 0;
60922 vmacache_flush(tsk);
60923 task_unlock(tsk);
60924@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
60925 }
60926 rcu_read_unlock();
60927
60928- if (p->fs->users > n_fs)
60929+ if (atomic_read(&p->fs->users) > n_fs)
60930 bprm->unsafe |= LSM_UNSAFE_SHARE;
60931 else
60932 p->fs->in_exec = 1;
60933@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
60934 return ret;
60935 }
60936
60937+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60938+static DEFINE_PER_CPU(u64, exec_counter);
60939+static int __init init_exec_counters(void)
60940+{
60941+ unsigned int cpu;
60942+
60943+ for_each_possible_cpu(cpu) {
60944+ per_cpu(exec_counter, cpu) = (u64)cpu;
60945+ }
60946+
60947+ return 0;
60948+}
60949+early_initcall(init_exec_counters);
60950+static inline void increment_exec_counter(void)
60951+{
60952+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
60953+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
60954+}
60955+#else
60956+static inline void increment_exec_counter(void) {}
60957+#endif
60958+
60959+extern void gr_handle_exec_args(struct linux_binprm *bprm,
60960+ struct user_arg_ptr argv);
60961+
60962 /*
60963 * sys_execve() executes a new program.
60964 */
60965@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60966 struct user_arg_ptr envp,
60967 int flags)
60968 {
60969+#ifdef CONFIG_GRKERNSEC
60970+ struct file *old_exec_file;
60971+ struct acl_subject_label *old_acl;
60972+ struct rlimit old_rlim[RLIM_NLIMITS];
60973+#endif
60974 char *pathbuf = NULL;
60975 struct linux_binprm *bprm;
60976 struct file *file;
60977@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
60978 if (IS_ERR(filename))
60979 return PTR_ERR(filename);
60980
60981+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
60982+
60983 /*
60984 * We move the actual failure in case of RLIMIT_NPROC excess from
60985 * set*uid() to execve() because too many poorly written programs
60986@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60987 if (IS_ERR(file))
60988 goto out_unmark;
60989
60990+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
60991+ retval = -EPERM;
60992+ goto out_unmark;
60993+ }
60994+
60995 sched_exec();
60996
60997 bprm->file = file;
60998@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60999 }
61000 bprm->interp = bprm->filename;
61001
61002+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61003+ retval = -EACCES;
61004+ goto out_unmark;
61005+ }
61006+
61007 retval = bprm_mm_init(bprm);
61008 if (retval)
61009 goto out_unmark;
61010@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
61011 if (retval < 0)
61012 goto out;
61013
61014+#ifdef CONFIG_GRKERNSEC
61015+ old_acl = current->acl;
61016+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61017+ old_exec_file = current->exec_file;
61018+ get_file(file);
61019+ current->exec_file = file;
61020+#endif
61021+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61022+ /* limit suid stack to 8MB
61023+ * we saved the old limits above and will restore them if this exec fails
61024+ */
61025+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61026+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61027+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61028+#endif
61029+
61030+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61031+ retval = -EPERM;
61032+ goto out_fail;
61033+ }
61034+
61035+ if (!gr_tpe_allow(file)) {
61036+ retval = -EACCES;
61037+ goto out_fail;
61038+ }
61039+
61040+ if (gr_check_crash_exec(file)) {
61041+ retval = -EACCES;
61042+ goto out_fail;
61043+ }
61044+
61045+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61046+ bprm->unsafe);
61047+ if (retval < 0)
61048+ goto out_fail;
61049+
61050 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61051 if (retval < 0)
61052- goto out;
61053+ goto out_fail;
61054
61055 bprm->exec = bprm->p;
61056 retval = copy_strings(bprm->envc, envp, bprm);
61057 if (retval < 0)
61058- goto out;
61059+ goto out_fail;
61060
61061 retval = copy_strings(bprm->argc, argv, bprm);
61062 if (retval < 0)
61063- goto out;
61064+ goto out_fail;
61065+
61066+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61067+
61068+ gr_handle_exec_args(bprm, argv);
61069
61070 retval = exec_binprm(bprm);
61071 if (retval < 0)
61072- goto out;
61073+ goto out_fail;
61074+#ifdef CONFIG_GRKERNSEC
61075+ if (old_exec_file)
61076+ fput(old_exec_file);
61077+#endif
61078
61079 /* execve succeeded */
61080+
61081+ increment_exec_counter();
61082 current->fs->in_exec = 0;
61083 current->in_execve = 0;
61084 acct_update_integrals(current);
61085@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61086 put_files_struct(displaced);
61087 return retval;
61088
61089+out_fail:
61090+#ifdef CONFIG_GRKERNSEC
61091+ current->acl = old_acl;
61092+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61093+ fput(current->exec_file);
61094+ current->exec_file = old_exec_file;
61095+#endif
61096+
61097 out:
61098 if (bprm->mm) {
61099 acct_arg_size(bprm, 0);
61100@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61101 argv, envp, flags);
61102 }
61103 #endif
61104+
61105+int pax_check_flags(unsigned long *flags)
61106+{
61107+ int retval = 0;
61108+
61109+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61110+ if (*flags & MF_PAX_SEGMEXEC)
61111+ {
61112+ *flags &= ~MF_PAX_SEGMEXEC;
61113+ retval = -EINVAL;
61114+ }
61115+#endif
61116+
61117+ if ((*flags & MF_PAX_PAGEEXEC)
61118+
61119+#ifdef CONFIG_PAX_PAGEEXEC
61120+ && (*flags & MF_PAX_SEGMEXEC)
61121+#endif
61122+
61123+ )
61124+ {
61125+ *flags &= ~MF_PAX_PAGEEXEC;
61126+ retval = -EINVAL;
61127+ }
61128+
61129+ if ((*flags & MF_PAX_MPROTECT)
61130+
61131+#ifdef CONFIG_PAX_MPROTECT
61132+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61133+#endif
61134+
61135+ )
61136+ {
61137+ *flags &= ~MF_PAX_MPROTECT;
61138+ retval = -EINVAL;
61139+ }
61140+
61141+ if ((*flags & MF_PAX_EMUTRAMP)
61142+
61143+#ifdef CONFIG_PAX_EMUTRAMP
61144+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61145+#endif
61146+
61147+ )
61148+ {
61149+ *flags &= ~MF_PAX_EMUTRAMP;
61150+ retval = -EINVAL;
61151+ }
61152+
61153+ return retval;
61154+}
61155+
61156+EXPORT_SYMBOL(pax_check_flags);
61157+
61158+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61159+char *pax_get_path(const struct path *path, char *buf, int buflen)
61160+{
61161+ char *pathname = d_path(path, buf, buflen);
61162+
61163+ if (IS_ERR(pathname))
61164+ goto toolong;
61165+
61166+ pathname = mangle_path(buf, pathname, "\t\n\\");
61167+ if (!pathname)
61168+ goto toolong;
61169+
61170+ *pathname = 0;
61171+ return buf;
61172+
61173+toolong:
61174+ return "<path too long>";
61175+}
61176+EXPORT_SYMBOL(pax_get_path);
61177+
61178+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61179+{
61180+ struct task_struct *tsk = current;
61181+ struct mm_struct *mm = current->mm;
61182+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61183+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61184+ char *path_exec = NULL;
61185+ char *path_fault = NULL;
61186+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61187+ siginfo_t info = { };
61188+
61189+ if (buffer_exec && buffer_fault) {
61190+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61191+
61192+ down_read(&mm->mmap_sem);
61193+ vma = mm->mmap;
61194+ while (vma && (!vma_exec || !vma_fault)) {
61195+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61196+ vma_exec = vma;
61197+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61198+ vma_fault = vma;
61199+ vma = vma->vm_next;
61200+ }
61201+ if (vma_exec)
61202+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61203+ if (vma_fault) {
61204+ start = vma_fault->vm_start;
61205+ end = vma_fault->vm_end;
61206+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61207+ if (vma_fault->vm_file)
61208+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61209+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61210+ path_fault = "<heap>";
61211+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61212+ path_fault = "<stack>";
61213+ else
61214+ path_fault = "<anonymous mapping>";
61215+ }
61216+ up_read(&mm->mmap_sem);
61217+ }
61218+ if (tsk->signal->curr_ip)
61219+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61220+ else
61221+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61222+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61223+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61224+ free_page((unsigned long)buffer_exec);
61225+ free_page((unsigned long)buffer_fault);
61226+ pax_report_insns(regs, pc, sp);
61227+ info.si_signo = SIGKILL;
61228+ info.si_errno = 0;
61229+ info.si_code = SI_KERNEL;
61230+ info.si_pid = 0;
61231+ info.si_uid = 0;
61232+ do_coredump(&info);
61233+}
61234+#endif
61235+
61236+#ifdef CONFIG_PAX_REFCOUNT
61237+void pax_report_refcount_overflow(struct pt_regs *regs)
61238+{
61239+ if (current->signal->curr_ip)
61240+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61241+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61242+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61243+ else
61244+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61245+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61246+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61247+ preempt_disable();
61248+ show_regs(regs);
61249+ preempt_enable();
61250+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61251+}
61252+#endif
61253+
61254+#ifdef CONFIG_PAX_USERCOPY
61255+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61256+static noinline int check_stack_object(const void *obj, unsigned long len)
61257+{
61258+ const void * const stack = task_stack_page(current);
61259+ const void * const stackend = stack + THREAD_SIZE;
61260+
61261+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61262+ const void *frame = NULL;
61263+ const void *oldframe;
61264+#endif
61265+
61266+ if (obj + len < obj)
61267+ return -1;
61268+
61269+ if (obj + len <= stack || stackend <= obj)
61270+ return 0;
61271+
61272+ if (obj < stack || stackend < obj + len)
61273+ return -1;
61274+
61275+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61276+ oldframe = __builtin_frame_address(1);
61277+ if (oldframe)
61278+ frame = __builtin_frame_address(2);
61279+ /*
61280+ low ----------------------------------------------> high
61281+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61282+ ^----------------^
61283+ allow copies only within here
61284+ */
61285+ while (stack <= frame && frame < stackend) {
61286+ /* if obj + len extends past the last frame, this
61287+ check won't pass and the next frame will be 0,
61288+ causing us to bail out and correctly report
61289+ the copy as invalid
61290+ */
61291+ if (obj + len <= frame)
61292+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61293+ oldframe = frame;
61294+ frame = *(const void * const *)frame;
61295+ }
61296+ return -1;
61297+#else
61298+ return 1;
61299+#endif
61300+}
61301+
61302+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61303+{
61304+ if (current->signal->curr_ip)
61305+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61306+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61307+ else
61308+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61309+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61310+ dump_stack();
61311+ gr_handle_kernel_exploit();
61312+ do_group_exit(SIGKILL);
61313+}
61314+#endif
61315+
61316+#ifdef CONFIG_PAX_USERCOPY
61317+
61318+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61319+{
61320+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61321+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61322+#ifdef CONFIG_MODULES
61323+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61324+#else
61325+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61326+#endif
61327+
61328+#else
61329+ unsigned long textlow = (unsigned long)_stext;
61330+ unsigned long texthigh = (unsigned long)_etext;
61331+
61332+#ifdef CONFIG_X86_64
61333+ /* check against linear mapping as well */
61334+ if (high > (unsigned long)__va(__pa(textlow)) &&
61335+ low < (unsigned long)__va(__pa(texthigh)))
61336+ return true;
61337+#endif
61338+
61339+#endif
61340+
61341+ if (high <= textlow || low >= texthigh)
61342+ return false;
61343+ else
61344+ return true;
61345+}
61346+#endif
61347+
61348+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61349+{
61350+#ifdef CONFIG_PAX_USERCOPY
61351+ const char *type;
61352+#endif
61353+
61354+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61355+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61356+ unsigned long currentsp = (unsigned long)&stackstart;
61357+ if (unlikely((currentsp < stackstart + 512 ||
61358+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61359+ BUG();
61360+#endif
61361+
61362+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61363+ if (const_size)
61364+ return;
61365+#endif
61366+
61367+#ifdef CONFIG_PAX_USERCOPY
61368+ if (!n)
61369+ return;
61370+
61371+ type = check_heap_object(ptr, n);
61372+ if (!type) {
61373+ int ret = check_stack_object(ptr, n);
61374+ if (ret == 1 || ret == 2)
61375+ return;
61376+ if (ret == 0) {
61377+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61378+ type = "<kernel text>";
61379+ else
61380+ return;
61381+ } else
61382+ type = "<process stack>";
61383+ }
61384+
61385+ pax_report_usercopy(ptr, n, to_user, type);
61386+#endif
61387+
61388+}
61389+EXPORT_SYMBOL(__check_object_size);
61390+
61391+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61392+void pax_track_stack(void)
61393+{
61394+ unsigned long sp = (unsigned long)&sp;
61395+ if (sp < current_thread_info()->lowest_stack &&
61396+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61397+ current_thread_info()->lowest_stack = sp;
61398+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61399+ BUG();
61400+}
61401+EXPORT_SYMBOL(pax_track_stack);
61402+#endif
61403+
61404+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61405+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61406+{
61407+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61408+ dump_stack();
61409+ do_group_exit(SIGKILL);
61410+}
61411+EXPORT_SYMBOL(report_size_overflow);
61412+#endif
61413diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61414index 9f9992b..8b59411 100644
61415--- a/fs/ext2/balloc.c
61416+++ b/fs/ext2/balloc.c
61417@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61418
61419 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61420 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61421- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61422+ if (free_blocks < root_blocks + 1 &&
61423 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61424 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61425- !in_group_p (sbi->s_resgid))) {
61426+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61427 return 0;
61428 }
61429 return 1;
61430diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61431index ae55fdd..5e64c27 100644
61432--- a/fs/ext2/super.c
61433+++ b/fs/ext2/super.c
61434@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61435 #ifdef CONFIG_EXT2_FS_XATTR
61436 if (test_opt(sb, XATTR_USER))
61437 seq_puts(seq, ",user_xattr");
61438- if (!test_opt(sb, XATTR_USER) &&
61439- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61440+ if (!test_opt(sb, XATTR_USER))
61441 seq_puts(seq, ",nouser_xattr");
61442- }
61443 #endif
61444
61445 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61446@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61447 if (def_mount_opts & EXT2_DEFM_UID16)
61448 set_opt(sbi->s_mount_opt, NO_UID32);
61449 #ifdef CONFIG_EXT2_FS_XATTR
61450- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61451- set_opt(sbi->s_mount_opt, XATTR_USER);
61452+ /* always enable user xattrs */
61453+ set_opt(sbi->s_mount_opt, XATTR_USER);
61454 #endif
61455 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61456 if (def_mount_opts & EXT2_DEFM_ACL)
61457diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61458index 9142614..97484fa 100644
61459--- a/fs/ext2/xattr.c
61460+++ b/fs/ext2/xattr.c
61461@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61462 struct buffer_head *bh = NULL;
61463 struct ext2_xattr_entry *entry;
61464 char *end;
61465- size_t rest = buffer_size;
61466+ size_t rest = buffer_size, total_size = 0;
61467 int error;
61468
61469 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61470@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61471 buffer += size;
61472 }
61473 rest -= size;
61474+ total_size += size;
61475 }
61476 }
61477- error = buffer_size - rest; /* total size */
61478+ error = total_size;
61479
61480 cleanup:
61481 brelse(bh);
61482diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61483index 158b5d4..2432610 100644
61484--- a/fs/ext3/balloc.c
61485+++ b/fs/ext3/balloc.c
61486@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61487
61488 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61489 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61490- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61491+ if (free_blocks < root_blocks + 1 &&
61492 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61493 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61494- !in_group_p (sbi->s_resgid))) {
61495+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61496 return 0;
61497 }
61498 return 1;
61499diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61500index 9b4e7d7..048d025 100644
61501--- a/fs/ext3/super.c
61502+++ b/fs/ext3/super.c
61503@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61504 #ifdef CONFIG_EXT3_FS_XATTR
61505 if (test_opt(sb, XATTR_USER))
61506 seq_puts(seq, ",user_xattr");
61507- if (!test_opt(sb, XATTR_USER) &&
61508- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61509+ if (!test_opt(sb, XATTR_USER))
61510 seq_puts(seq, ",nouser_xattr");
61511- }
61512 #endif
61513 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61514 if (test_opt(sb, POSIX_ACL))
61515@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61516 if (def_mount_opts & EXT3_DEFM_UID16)
61517 set_opt(sbi->s_mount_opt, NO_UID32);
61518 #ifdef CONFIG_EXT3_FS_XATTR
61519- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61520- set_opt(sbi->s_mount_opt, XATTR_USER);
61521+ /* always enable user xattrs */
61522+ set_opt(sbi->s_mount_opt, XATTR_USER);
61523 #endif
61524 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61525 if (def_mount_opts & EXT3_DEFM_ACL)
61526diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61527index c6874be..f8a6ae8 100644
61528--- a/fs/ext3/xattr.c
61529+++ b/fs/ext3/xattr.c
61530@@ -330,7 +330,7 @@ static int
61531 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61532 char *buffer, size_t buffer_size)
61533 {
61534- size_t rest = buffer_size;
61535+ size_t rest = buffer_size, total_size = 0;
61536
61537 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61538 const struct xattr_handler *handler =
61539@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61540 buffer += size;
61541 }
61542 rest -= size;
61543+ total_size += size;
61544 }
61545 }
61546- return buffer_size - rest;
61547+ return total_size;
61548 }
61549
61550 static int
61551diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61552index 83a6f49..d4e4d03 100644
61553--- a/fs/ext4/balloc.c
61554+++ b/fs/ext4/balloc.c
61555@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61556 /* Hm, nope. Are (enough) root reserved clusters available? */
61557 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61558 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61559- capable(CAP_SYS_RESOURCE) ||
61560- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61561+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61562+ capable_nolog(CAP_SYS_RESOURCE)) {
61563
61564 if (free_clusters >= (nclusters + dirty_clusters +
61565 resv_clusters))
61566diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61567index a75fba6..8235fca 100644
61568--- a/fs/ext4/ext4.h
61569+++ b/fs/ext4/ext4.h
61570@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61571 unsigned long s_mb_last_start;
61572
61573 /* stats for buddy allocator */
61574- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61575- atomic_t s_bal_success; /* we found long enough chunks */
61576- atomic_t s_bal_allocated; /* in blocks */
61577- atomic_t s_bal_ex_scanned; /* total extents scanned */
61578- atomic_t s_bal_goals; /* goal hits */
61579- atomic_t s_bal_breaks; /* too long searches */
61580- atomic_t s_bal_2orders; /* 2^order hits */
61581+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61582+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61583+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61584+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61585+ atomic_unchecked_t s_bal_goals; /* goal hits */
61586+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61587+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61588 spinlock_t s_bal_lock;
61589 unsigned long s_mb_buddies_generated;
61590 unsigned long long s_mb_generation_time;
61591- atomic_t s_mb_lost_chunks;
61592- atomic_t s_mb_preallocated;
61593- atomic_t s_mb_discarded;
61594+ atomic_unchecked_t s_mb_lost_chunks;
61595+ atomic_unchecked_t s_mb_preallocated;
61596+ atomic_unchecked_t s_mb_discarded;
61597 atomic_t s_lock_busy;
61598
61599 /* locality groups */
61600diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61601index 8d1e602..abf497b 100644
61602--- a/fs/ext4/mballoc.c
61603+++ b/fs/ext4/mballoc.c
61604@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61605 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61606
61607 if (EXT4_SB(sb)->s_mb_stats)
61608- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61609+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61610
61611 break;
61612 }
61613@@ -2211,7 +2211,7 @@ repeat:
61614 ac->ac_status = AC_STATUS_CONTINUE;
61615 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61616 cr = 3;
61617- atomic_inc(&sbi->s_mb_lost_chunks);
61618+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61619 goto repeat;
61620 }
61621 }
61622@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61623 if (sbi->s_mb_stats) {
61624 ext4_msg(sb, KERN_INFO,
61625 "mballoc: %u blocks %u reqs (%u success)",
61626- atomic_read(&sbi->s_bal_allocated),
61627- atomic_read(&sbi->s_bal_reqs),
61628- atomic_read(&sbi->s_bal_success));
61629+ atomic_read_unchecked(&sbi->s_bal_allocated),
61630+ atomic_read_unchecked(&sbi->s_bal_reqs),
61631+ atomic_read_unchecked(&sbi->s_bal_success));
61632 ext4_msg(sb, KERN_INFO,
61633 "mballoc: %u extents scanned, %u goal hits, "
61634 "%u 2^N hits, %u breaks, %u lost",
61635- atomic_read(&sbi->s_bal_ex_scanned),
61636- atomic_read(&sbi->s_bal_goals),
61637- atomic_read(&sbi->s_bal_2orders),
61638- atomic_read(&sbi->s_bal_breaks),
61639- atomic_read(&sbi->s_mb_lost_chunks));
61640+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61641+ atomic_read_unchecked(&sbi->s_bal_goals),
61642+ atomic_read_unchecked(&sbi->s_bal_2orders),
61643+ atomic_read_unchecked(&sbi->s_bal_breaks),
61644+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61645 ext4_msg(sb, KERN_INFO,
61646 "mballoc: %lu generated and it took %Lu",
61647 sbi->s_mb_buddies_generated,
61648 sbi->s_mb_generation_time);
61649 ext4_msg(sb, KERN_INFO,
61650 "mballoc: %u preallocated, %u discarded",
61651- atomic_read(&sbi->s_mb_preallocated),
61652- atomic_read(&sbi->s_mb_discarded));
61653+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61654+ atomic_read_unchecked(&sbi->s_mb_discarded));
61655 }
61656
61657 free_percpu(sbi->s_locality_groups);
61658@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61659 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61660
61661 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61662- atomic_inc(&sbi->s_bal_reqs);
61663- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61664+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61665+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61666 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61667- atomic_inc(&sbi->s_bal_success);
61668- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61669+ atomic_inc_unchecked(&sbi->s_bal_success);
61670+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61671 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61672 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61673- atomic_inc(&sbi->s_bal_goals);
61674+ atomic_inc_unchecked(&sbi->s_bal_goals);
61675 if (ac->ac_found > sbi->s_mb_max_to_scan)
61676- atomic_inc(&sbi->s_bal_breaks);
61677+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61678 }
61679
61680 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61681@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61682 trace_ext4_mb_new_inode_pa(ac, pa);
61683
61684 ext4_mb_use_inode_pa(ac, pa);
61685- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61686+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61687
61688 ei = EXT4_I(ac->ac_inode);
61689 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61690@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61691 trace_ext4_mb_new_group_pa(ac, pa);
61692
61693 ext4_mb_use_group_pa(ac, pa);
61694- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61695+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61696
61697 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61698 lg = ac->ac_lg;
61699@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61700 * from the bitmap and continue.
61701 */
61702 }
61703- atomic_add(free, &sbi->s_mb_discarded);
61704+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61705
61706 return err;
61707 }
61708@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61709 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61710 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61711 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61712- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61713+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61714 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61715
61716 return 0;
61717diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61718index 8313ca3..8a37d08 100644
61719--- a/fs/ext4/mmp.c
61720+++ b/fs/ext4/mmp.c
61721@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61722 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61723 const char *function, unsigned int line, const char *msg)
61724 {
61725- __ext4_warning(sb, function, line, msg);
61726+ __ext4_warning(sb, function, line, "%s", msg);
61727 __ext4_warning(sb, function, line,
61728 "MMP failure info: last update time: %llu, last update "
61729 "node: %s, last update device: %s\n",
61730diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
61731index 8a8ec62..1b02de5 100644
61732--- a/fs/ext4/resize.c
61733+++ b/fs/ext4/resize.c
61734@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61735
61736 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
61737 for (count2 = count; count > 0; count -= count2, block += count2) {
61738- ext4_fsblk_t start;
61739+ ext4_fsblk_t start, diff;
61740 struct buffer_head *bh;
61741 ext4_group_t group;
61742 int err;
61743@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61744 start = ext4_group_first_block_no(sb, group);
61745 group -= flex_gd->groups[0].group;
61746
61747- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
61748- if (count2 > count)
61749- count2 = count;
61750-
61751 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
61752 BUG_ON(flex_gd->count > 1);
61753 continue;
61754@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61755 err = ext4_journal_get_write_access(handle, bh);
61756 if (err)
61757 return err;
61758+
61759+ diff = block - start;
61760+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
61761+ if (count2 > count)
61762+ count2 = count;
61763+
61764 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
61765- block - start, count2);
61766- ext4_set_bits(bh->b_data, block - start, count2);
61767+ diff, count2);
61768+ ext4_set_bits(bh->b_data, diff, count2);
61769
61770 err = ext4_handle_dirty_metadata(handle, NULL, bh);
61771 if (unlikely(err))
61772diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61773index fc29b2c..6c8b255 100644
61774--- a/fs/ext4/super.c
61775+++ b/fs/ext4/super.c
61776@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61777 }
61778
61779 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61780-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61781+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61782 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61783
61784 #ifdef CONFIG_QUOTA
61785@@ -2440,7 +2440,7 @@ struct ext4_attr {
61786 int offset;
61787 int deprecated_val;
61788 } u;
61789-};
61790+} __do_const;
61791
61792 static int parse_strtoull(const char *buf,
61793 unsigned long long max, unsigned long long *value)
61794diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61795index 1e09fc7..0400dd4 100644
61796--- a/fs/ext4/xattr.c
61797+++ b/fs/ext4/xattr.c
61798@@ -399,7 +399,7 @@ static int
61799 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61800 char *buffer, size_t buffer_size)
61801 {
61802- size_t rest = buffer_size;
61803+ size_t rest = buffer_size, total_size = 0;
61804
61805 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61806 const struct xattr_handler *handler =
61807@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61808 buffer += size;
61809 }
61810 rest -= size;
61811+ total_size += size;
61812 }
61813 }
61814- return buffer_size - rest;
61815+ return total_size;
61816 }
61817
61818 static int
61819diff --git a/fs/fcntl.c b/fs/fcntl.c
61820index ee85cd4..9dd0d20 100644
61821--- a/fs/fcntl.c
61822+++ b/fs/fcntl.c
61823@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61824 int force)
61825 {
61826 security_file_set_fowner(filp);
61827+ if (gr_handle_chroot_fowner(pid, type))
61828+ return;
61829+ if (gr_check_protected_task_fowner(pid, type))
61830+ return;
61831 f_modown(filp, pid, type, force);
61832 }
61833 EXPORT_SYMBOL(__f_setown);
61834diff --git a/fs/fhandle.c b/fs/fhandle.c
61835index 999ff5c..2281df9 100644
61836--- a/fs/fhandle.c
61837+++ b/fs/fhandle.c
61838@@ -8,6 +8,7 @@
61839 #include <linux/fs_struct.h>
61840 #include <linux/fsnotify.h>
61841 #include <linux/personality.h>
61842+#include <linux/grsecurity.h>
61843 #include <asm/uaccess.h>
61844 #include "internal.h"
61845 #include "mount.h"
61846@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61847 } else
61848 retval = 0;
61849 /* copy the mount id */
61850- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61851- sizeof(*mnt_id)) ||
61852+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61853 copy_to_user(ufh, handle,
61854 sizeof(struct file_handle) + handle_bytes))
61855 retval = -EFAULT;
61856@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61857 * the directory. Ideally we would like CAP_DAC_SEARCH.
61858 * But we don't have that
61859 */
61860- if (!capable(CAP_DAC_READ_SEARCH)) {
61861+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61862 retval = -EPERM;
61863 goto out_err;
61864 }
61865@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61866 goto out_err;
61867 }
61868 /* copy the full handle */
61869- if (copy_from_user(handle, ufh,
61870- sizeof(struct file_handle) +
61871+ *handle = f_handle;
61872+ if (copy_from_user(&handle->f_handle,
61873+ &ufh->f_handle,
61874 f_handle.handle_bytes)) {
61875 retval = -EFAULT;
61876 goto out_handle;
61877diff --git a/fs/file.c b/fs/file.c
61878index ee738ea..f6c1562 100644
61879--- a/fs/file.c
61880+++ b/fs/file.c
61881@@ -16,6 +16,7 @@
61882 #include <linux/slab.h>
61883 #include <linux/vmalloc.h>
61884 #include <linux/file.h>
61885+#include <linux/security.h>
61886 #include <linux/fdtable.h>
61887 #include <linux/bitops.h>
61888 #include <linux/interrupt.h>
61889@@ -139,7 +140,7 @@ out:
61890 * Return <0 error code on error; 1 on successful completion.
61891 * The files->file_lock should be held on entry, and will be held on exit.
61892 */
61893-static int expand_fdtable(struct files_struct *files, int nr)
61894+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61895 __releases(files->file_lock)
61896 __acquires(files->file_lock)
61897 {
61898@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61899 * expanded and execution may have blocked.
61900 * The files->file_lock should be held on entry, and will be held on exit.
61901 */
61902-static int expand_files(struct files_struct *files, int nr)
61903+static int expand_files(struct files_struct *files, unsigned int nr)
61904 {
61905 struct fdtable *fdt;
61906
61907@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61908 if (!file)
61909 return __close_fd(files, fd);
61910
61911+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61912 if (fd >= rlimit(RLIMIT_NOFILE))
61913 return -EBADF;
61914
61915@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61916 if (unlikely(oldfd == newfd))
61917 return -EINVAL;
61918
61919+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61920 if (newfd >= rlimit(RLIMIT_NOFILE))
61921 return -EBADF;
61922
61923@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
61924 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
61925 {
61926 int err;
61927+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
61928 if (from >= rlimit(RLIMIT_NOFILE))
61929 return -EINVAL;
61930 err = alloc_fd(from, flags);
61931diff --git a/fs/filesystems.c b/fs/filesystems.c
61932index 5797d45..7d7d79a 100644
61933--- a/fs/filesystems.c
61934+++ b/fs/filesystems.c
61935@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
61936 int len = dot ? dot - name : strlen(name);
61937
61938 fs = __get_fs_type(name, len);
61939+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61940+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
61941+#else
61942 if (!fs && (request_module("fs-%.*s", len, name) == 0))
61943+#endif
61944 fs = __get_fs_type(name, len);
61945
61946 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
61947diff --git a/fs/fs_struct.c b/fs/fs_struct.c
61948index 7dca743..2f2786d 100644
61949--- a/fs/fs_struct.c
61950+++ b/fs/fs_struct.c
61951@@ -4,6 +4,7 @@
61952 #include <linux/path.h>
61953 #include <linux/slab.h>
61954 #include <linux/fs_struct.h>
61955+#include <linux/grsecurity.h>
61956 #include "internal.h"
61957
61958 /*
61959@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
61960 struct path old_root;
61961
61962 path_get(path);
61963+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
61964 spin_lock(&fs->lock);
61965 write_seqcount_begin(&fs->seq);
61966 old_root = fs->root;
61967 fs->root = *path;
61968+ gr_set_chroot_entries(current, path);
61969 write_seqcount_end(&fs->seq);
61970 spin_unlock(&fs->lock);
61971- if (old_root.dentry)
61972+ if (old_root.dentry) {
61973+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
61974 path_put(&old_root);
61975+ }
61976 }
61977
61978 /*
61979@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61980 int hits = 0;
61981 spin_lock(&fs->lock);
61982 write_seqcount_begin(&fs->seq);
61983+ /* this root replacement is only done by pivot_root,
61984+ leave grsec's chroot tagging alone for this task
61985+ so that a pivoted root isn't treated as a chroot
61986+ */
61987 hits += replace_path(&fs->root, old_root, new_root);
61988 hits += replace_path(&fs->pwd, old_root, new_root);
61989 write_seqcount_end(&fs->seq);
61990@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61991
61992 void free_fs_struct(struct fs_struct *fs)
61993 {
61994+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
61995 path_put(&fs->root);
61996 path_put(&fs->pwd);
61997 kmem_cache_free(fs_cachep, fs);
61998@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
61999 task_lock(tsk);
62000 spin_lock(&fs->lock);
62001 tsk->fs = NULL;
62002- kill = !--fs->users;
62003+ gr_clear_chroot_entries(tsk);
62004+ kill = !atomic_dec_return(&fs->users);
62005 spin_unlock(&fs->lock);
62006 task_unlock(tsk);
62007 if (kill)
62008@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62009 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62010 /* We don't need to lock fs - think why ;-) */
62011 if (fs) {
62012- fs->users = 1;
62013+ atomic_set(&fs->users, 1);
62014 fs->in_exec = 0;
62015 spin_lock_init(&fs->lock);
62016 seqcount_init(&fs->seq);
62017@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62018 spin_lock(&old->lock);
62019 fs->root = old->root;
62020 path_get(&fs->root);
62021+ /* instead of calling gr_set_chroot_entries here,
62022+ we call it from every caller of this function
62023+ */
62024 fs->pwd = old->pwd;
62025 path_get(&fs->pwd);
62026 spin_unlock(&old->lock);
62027@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
62028
62029 task_lock(current);
62030 spin_lock(&fs->lock);
62031- kill = !--fs->users;
62032+ kill = !atomic_dec_return(&fs->users);
62033 current->fs = new_fs;
62034+ gr_set_chroot_entries(current, &new_fs->root);
62035 spin_unlock(&fs->lock);
62036 task_unlock(current);
62037
62038@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62039
62040 int current_umask(void)
62041 {
62042- return current->fs->umask;
62043+ return current->fs->umask | gr_acl_umask();
62044 }
62045 EXPORT_SYMBOL(current_umask);
62046
62047 /* to be mentioned only in INIT_TASK */
62048 struct fs_struct init_fs = {
62049- .users = 1,
62050+ .users = ATOMIC_INIT(1),
62051 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62052 .seq = SEQCNT_ZERO(init_fs.seq),
62053 .umask = 0022,
62054diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62055index 89acec7..a575262 100644
62056--- a/fs/fscache/cookie.c
62057+++ b/fs/fscache/cookie.c
62058@@ -19,7 +19,7 @@
62059
62060 struct kmem_cache *fscache_cookie_jar;
62061
62062-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62063+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62064
62065 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62066 static int fscache_alloc_object(struct fscache_cache *cache,
62067@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62068 parent ? (char *) parent->def->name : "<no-parent>",
62069 def->name, netfs_data, enable);
62070
62071- fscache_stat(&fscache_n_acquires);
62072+ fscache_stat_unchecked(&fscache_n_acquires);
62073
62074 /* if there's no parent cookie, then we don't create one here either */
62075 if (!parent) {
62076- fscache_stat(&fscache_n_acquires_null);
62077+ fscache_stat_unchecked(&fscache_n_acquires_null);
62078 _leave(" [no parent]");
62079 return NULL;
62080 }
62081@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62082 /* allocate and initialise a cookie */
62083 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62084 if (!cookie) {
62085- fscache_stat(&fscache_n_acquires_oom);
62086+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62087 _leave(" [ENOMEM]");
62088 return NULL;
62089 }
62090@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62091
62092 switch (cookie->def->type) {
62093 case FSCACHE_COOKIE_TYPE_INDEX:
62094- fscache_stat(&fscache_n_cookie_index);
62095+ fscache_stat_unchecked(&fscache_n_cookie_index);
62096 break;
62097 case FSCACHE_COOKIE_TYPE_DATAFILE:
62098- fscache_stat(&fscache_n_cookie_data);
62099+ fscache_stat_unchecked(&fscache_n_cookie_data);
62100 break;
62101 default:
62102- fscache_stat(&fscache_n_cookie_special);
62103+ fscache_stat_unchecked(&fscache_n_cookie_special);
62104 break;
62105 }
62106
62107@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62108 } else {
62109 atomic_dec(&parent->n_children);
62110 __fscache_cookie_put(cookie);
62111- fscache_stat(&fscache_n_acquires_nobufs);
62112+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62113 _leave(" = NULL");
62114 return NULL;
62115 }
62116@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62117 }
62118 }
62119
62120- fscache_stat(&fscache_n_acquires_ok);
62121+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62122 _leave(" = %p", cookie);
62123 return cookie;
62124 }
62125@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62126 cache = fscache_select_cache_for_object(cookie->parent);
62127 if (!cache) {
62128 up_read(&fscache_addremove_sem);
62129- fscache_stat(&fscache_n_acquires_no_cache);
62130+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62131 _leave(" = -ENOMEDIUM [no cache]");
62132 return -ENOMEDIUM;
62133 }
62134@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62135 object = cache->ops->alloc_object(cache, cookie);
62136 fscache_stat_d(&fscache_n_cop_alloc_object);
62137 if (IS_ERR(object)) {
62138- fscache_stat(&fscache_n_object_no_alloc);
62139+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62140 ret = PTR_ERR(object);
62141 goto error;
62142 }
62143
62144- fscache_stat(&fscache_n_object_alloc);
62145+ fscache_stat_unchecked(&fscache_n_object_alloc);
62146
62147- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62148+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62149
62150 _debug("ALLOC OBJ%x: %s {%lx}",
62151 object->debug_id, cookie->def->name, object->events);
62152@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62153
62154 _enter("{%s}", cookie->def->name);
62155
62156- fscache_stat(&fscache_n_invalidates);
62157+ fscache_stat_unchecked(&fscache_n_invalidates);
62158
62159 /* Only permit invalidation of data files. Invalidating an index will
62160 * require the caller to release all its attachments to the tree rooted
62161@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62162 {
62163 struct fscache_object *object;
62164
62165- fscache_stat(&fscache_n_updates);
62166+ fscache_stat_unchecked(&fscache_n_updates);
62167
62168 if (!cookie) {
62169- fscache_stat(&fscache_n_updates_null);
62170+ fscache_stat_unchecked(&fscache_n_updates_null);
62171 _leave(" [no cookie]");
62172 return;
62173 }
62174@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62175 */
62176 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62177 {
62178- fscache_stat(&fscache_n_relinquishes);
62179+ fscache_stat_unchecked(&fscache_n_relinquishes);
62180 if (retire)
62181- fscache_stat(&fscache_n_relinquishes_retire);
62182+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62183
62184 if (!cookie) {
62185- fscache_stat(&fscache_n_relinquishes_null);
62186+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62187 _leave(" [no cookie]");
62188 return;
62189 }
62190@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62191 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62192 goto inconsistent;
62193
62194- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62195+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62196
62197 __fscache_use_cookie(cookie);
62198 if (fscache_submit_op(object, op) < 0)
62199diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62200index 7872a62..d91b19f 100644
62201--- a/fs/fscache/internal.h
62202+++ b/fs/fscache/internal.h
62203@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62204 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62205 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62206 struct fscache_operation *,
62207- atomic_t *,
62208- atomic_t *,
62209+ atomic_unchecked_t *,
62210+ atomic_unchecked_t *,
62211 void (*)(struct fscache_operation *));
62212 extern void fscache_invalidate_writes(struct fscache_cookie *);
62213
62214@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62215 * stats.c
62216 */
62217 #ifdef CONFIG_FSCACHE_STATS
62218-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62219-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62220+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62221+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62222
62223-extern atomic_t fscache_n_op_pend;
62224-extern atomic_t fscache_n_op_run;
62225-extern atomic_t fscache_n_op_enqueue;
62226-extern atomic_t fscache_n_op_deferred_release;
62227-extern atomic_t fscache_n_op_release;
62228-extern atomic_t fscache_n_op_gc;
62229-extern atomic_t fscache_n_op_cancelled;
62230-extern atomic_t fscache_n_op_rejected;
62231+extern atomic_unchecked_t fscache_n_op_pend;
62232+extern atomic_unchecked_t fscache_n_op_run;
62233+extern atomic_unchecked_t fscache_n_op_enqueue;
62234+extern atomic_unchecked_t fscache_n_op_deferred_release;
62235+extern atomic_unchecked_t fscache_n_op_release;
62236+extern atomic_unchecked_t fscache_n_op_gc;
62237+extern atomic_unchecked_t fscache_n_op_cancelled;
62238+extern atomic_unchecked_t fscache_n_op_rejected;
62239
62240-extern atomic_t fscache_n_attr_changed;
62241-extern atomic_t fscache_n_attr_changed_ok;
62242-extern atomic_t fscache_n_attr_changed_nobufs;
62243-extern atomic_t fscache_n_attr_changed_nomem;
62244-extern atomic_t fscache_n_attr_changed_calls;
62245+extern atomic_unchecked_t fscache_n_attr_changed;
62246+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62247+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62248+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62249+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62250
62251-extern atomic_t fscache_n_allocs;
62252-extern atomic_t fscache_n_allocs_ok;
62253-extern atomic_t fscache_n_allocs_wait;
62254-extern atomic_t fscache_n_allocs_nobufs;
62255-extern atomic_t fscache_n_allocs_intr;
62256-extern atomic_t fscache_n_allocs_object_dead;
62257-extern atomic_t fscache_n_alloc_ops;
62258-extern atomic_t fscache_n_alloc_op_waits;
62259+extern atomic_unchecked_t fscache_n_allocs;
62260+extern atomic_unchecked_t fscache_n_allocs_ok;
62261+extern atomic_unchecked_t fscache_n_allocs_wait;
62262+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62263+extern atomic_unchecked_t fscache_n_allocs_intr;
62264+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62265+extern atomic_unchecked_t fscache_n_alloc_ops;
62266+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62267
62268-extern atomic_t fscache_n_retrievals;
62269-extern atomic_t fscache_n_retrievals_ok;
62270-extern atomic_t fscache_n_retrievals_wait;
62271-extern atomic_t fscache_n_retrievals_nodata;
62272-extern atomic_t fscache_n_retrievals_nobufs;
62273-extern atomic_t fscache_n_retrievals_intr;
62274-extern atomic_t fscache_n_retrievals_nomem;
62275-extern atomic_t fscache_n_retrievals_object_dead;
62276-extern atomic_t fscache_n_retrieval_ops;
62277-extern atomic_t fscache_n_retrieval_op_waits;
62278+extern atomic_unchecked_t fscache_n_retrievals;
62279+extern atomic_unchecked_t fscache_n_retrievals_ok;
62280+extern atomic_unchecked_t fscache_n_retrievals_wait;
62281+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62282+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62283+extern atomic_unchecked_t fscache_n_retrievals_intr;
62284+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62285+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62286+extern atomic_unchecked_t fscache_n_retrieval_ops;
62287+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62288
62289-extern atomic_t fscache_n_stores;
62290-extern atomic_t fscache_n_stores_ok;
62291-extern atomic_t fscache_n_stores_again;
62292-extern atomic_t fscache_n_stores_nobufs;
62293-extern atomic_t fscache_n_stores_oom;
62294-extern atomic_t fscache_n_store_ops;
62295-extern atomic_t fscache_n_store_calls;
62296-extern atomic_t fscache_n_store_pages;
62297-extern atomic_t fscache_n_store_radix_deletes;
62298-extern atomic_t fscache_n_store_pages_over_limit;
62299+extern atomic_unchecked_t fscache_n_stores;
62300+extern atomic_unchecked_t fscache_n_stores_ok;
62301+extern atomic_unchecked_t fscache_n_stores_again;
62302+extern atomic_unchecked_t fscache_n_stores_nobufs;
62303+extern atomic_unchecked_t fscache_n_stores_oom;
62304+extern atomic_unchecked_t fscache_n_store_ops;
62305+extern atomic_unchecked_t fscache_n_store_calls;
62306+extern atomic_unchecked_t fscache_n_store_pages;
62307+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62308+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62309
62310-extern atomic_t fscache_n_store_vmscan_not_storing;
62311-extern atomic_t fscache_n_store_vmscan_gone;
62312-extern atomic_t fscache_n_store_vmscan_busy;
62313-extern atomic_t fscache_n_store_vmscan_cancelled;
62314-extern atomic_t fscache_n_store_vmscan_wait;
62315+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62316+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62317+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62318+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62319+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62320
62321-extern atomic_t fscache_n_marks;
62322-extern atomic_t fscache_n_uncaches;
62323+extern atomic_unchecked_t fscache_n_marks;
62324+extern atomic_unchecked_t fscache_n_uncaches;
62325
62326-extern atomic_t fscache_n_acquires;
62327-extern atomic_t fscache_n_acquires_null;
62328-extern atomic_t fscache_n_acquires_no_cache;
62329-extern atomic_t fscache_n_acquires_ok;
62330-extern atomic_t fscache_n_acquires_nobufs;
62331-extern atomic_t fscache_n_acquires_oom;
62332+extern atomic_unchecked_t fscache_n_acquires;
62333+extern atomic_unchecked_t fscache_n_acquires_null;
62334+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62335+extern atomic_unchecked_t fscache_n_acquires_ok;
62336+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62337+extern atomic_unchecked_t fscache_n_acquires_oom;
62338
62339-extern atomic_t fscache_n_invalidates;
62340-extern atomic_t fscache_n_invalidates_run;
62341+extern atomic_unchecked_t fscache_n_invalidates;
62342+extern atomic_unchecked_t fscache_n_invalidates_run;
62343
62344-extern atomic_t fscache_n_updates;
62345-extern atomic_t fscache_n_updates_null;
62346-extern atomic_t fscache_n_updates_run;
62347+extern atomic_unchecked_t fscache_n_updates;
62348+extern atomic_unchecked_t fscache_n_updates_null;
62349+extern atomic_unchecked_t fscache_n_updates_run;
62350
62351-extern atomic_t fscache_n_relinquishes;
62352-extern atomic_t fscache_n_relinquishes_null;
62353-extern atomic_t fscache_n_relinquishes_waitcrt;
62354-extern atomic_t fscache_n_relinquishes_retire;
62355+extern atomic_unchecked_t fscache_n_relinquishes;
62356+extern atomic_unchecked_t fscache_n_relinquishes_null;
62357+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62358+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62359
62360-extern atomic_t fscache_n_cookie_index;
62361-extern atomic_t fscache_n_cookie_data;
62362-extern atomic_t fscache_n_cookie_special;
62363+extern atomic_unchecked_t fscache_n_cookie_index;
62364+extern atomic_unchecked_t fscache_n_cookie_data;
62365+extern atomic_unchecked_t fscache_n_cookie_special;
62366
62367-extern atomic_t fscache_n_object_alloc;
62368-extern atomic_t fscache_n_object_no_alloc;
62369-extern atomic_t fscache_n_object_lookups;
62370-extern atomic_t fscache_n_object_lookups_negative;
62371-extern atomic_t fscache_n_object_lookups_positive;
62372-extern atomic_t fscache_n_object_lookups_timed_out;
62373-extern atomic_t fscache_n_object_created;
62374-extern atomic_t fscache_n_object_avail;
62375-extern atomic_t fscache_n_object_dead;
62376+extern atomic_unchecked_t fscache_n_object_alloc;
62377+extern atomic_unchecked_t fscache_n_object_no_alloc;
62378+extern atomic_unchecked_t fscache_n_object_lookups;
62379+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62380+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62381+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62382+extern atomic_unchecked_t fscache_n_object_created;
62383+extern atomic_unchecked_t fscache_n_object_avail;
62384+extern atomic_unchecked_t fscache_n_object_dead;
62385
62386-extern atomic_t fscache_n_checkaux_none;
62387-extern atomic_t fscache_n_checkaux_okay;
62388-extern atomic_t fscache_n_checkaux_update;
62389-extern atomic_t fscache_n_checkaux_obsolete;
62390+extern atomic_unchecked_t fscache_n_checkaux_none;
62391+extern atomic_unchecked_t fscache_n_checkaux_okay;
62392+extern atomic_unchecked_t fscache_n_checkaux_update;
62393+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62394
62395 extern atomic_t fscache_n_cop_alloc_object;
62396 extern atomic_t fscache_n_cop_lookup_object;
62397@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62398 atomic_inc(stat);
62399 }
62400
62401+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62402+{
62403+ atomic_inc_unchecked(stat);
62404+}
62405+
62406 static inline void fscache_stat_d(atomic_t *stat)
62407 {
62408 atomic_dec(stat);
62409@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62410
62411 #define __fscache_stat(stat) (NULL)
62412 #define fscache_stat(stat) do {} while (0)
62413+#define fscache_stat_unchecked(stat) do {} while (0)
62414 #define fscache_stat_d(stat) do {} while (0)
62415 #endif
62416
62417diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62418index da032da..0076ce7 100644
62419--- a/fs/fscache/object.c
62420+++ b/fs/fscache/object.c
62421@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62422 _debug("LOOKUP \"%s\" in \"%s\"",
62423 cookie->def->name, object->cache->tag->name);
62424
62425- fscache_stat(&fscache_n_object_lookups);
62426+ fscache_stat_unchecked(&fscache_n_object_lookups);
62427 fscache_stat(&fscache_n_cop_lookup_object);
62428 ret = object->cache->ops->lookup_object(object);
62429 fscache_stat_d(&fscache_n_cop_lookup_object);
62430@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62431 if (ret == -ETIMEDOUT) {
62432 /* probably stuck behind another object, so move this one to
62433 * the back of the queue */
62434- fscache_stat(&fscache_n_object_lookups_timed_out);
62435+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62436 _leave(" [timeout]");
62437 return NO_TRANSIT;
62438 }
62439@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62440 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62441
62442 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62443- fscache_stat(&fscache_n_object_lookups_negative);
62444+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62445
62446 /* Allow write requests to begin stacking up and read requests to begin
62447 * returning ENODATA.
62448@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62449 /* if we were still looking up, then we must have a positive lookup
62450 * result, in which case there may be data available */
62451 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62452- fscache_stat(&fscache_n_object_lookups_positive);
62453+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62454
62455 /* We do (presumably) have data */
62456 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62457@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62458 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62459 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62460 } else {
62461- fscache_stat(&fscache_n_object_created);
62462+ fscache_stat_unchecked(&fscache_n_object_created);
62463 }
62464
62465 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62466@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62467 fscache_stat_d(&fscache_n_cop_lookup_complete);
62468
62469 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62470- fscache_stat(&fscache_n_object_avail);
62471+ fscache_stat_unchecked(&fscache_n_object_avail);
62472
62473 _leave("");
62474 return transit_to(JUMPSTART_DEPS);
62475@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62476
62477 /* this just shifts the object release to the work processor */
62478 fscache_put_object(object);
62479- fscache_stat(&fscache_n_object_dead);
62480+ fscache_stat_unchecked(&fscache_n_object_dead);
62481
62482 _leave("");
62483 return transit_to(OBJECT_DEAD);
62484@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62485 enum fscache_checkaux result;
62486
62487 if (!object->cookie->def->check_aux) {
62488- fscache_stat(&fscache_n_checkaux_none);
62489+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62490 return FSCACHE_CHECKAUX_OKAY;
62491 }
62492
62493@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62494 switch (result) {
62495 /* entry okay as is */
62496 case FSCACHE_CHECKAUX_OKAY:
62497- fscache_stat(&fscache_n_checkaux_okay);
62498+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62499 break;
62500
62501 /* entry requires update */
62502 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62503- fscache_stat(&fscache_n_checkaux_update);
62504+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62505 break;
62506
62507 /* entry requires deletion */
62508 case FSCACHE_CHECKAUX_OBSOLETE:
62509- fscache_stat(&fscache_n_checkaux_obsolete);
62510+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62511 break;
62512
62513 default:
62514@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62515 {
62516 const struct fscache_state *s;
62517
62518- fscache_stat(&fscache_n_invalidates_run);
62519+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62520 fscache_stat(&fscache_n_cop_invalidate_object);
62521 s = _fscache_invalidate_object(object, event);
62522 fscache_stat_d(&fscache_n_cop_invalidate_object);
62523@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62524 {
62525 _enter("{OBJ%x},%d", object->debug_id, event);
62526
62527- fscache_stat(&fscache_n_updates_run);
62528+ fscache_stat_unchecked(&fscache_n_updates_run);
62529 fscache_stat(&fscache_n_cop_update_object);
62530 object->cache->ops->update_object(object);
62531 fscache_stat_d(&fscache_n_cop_update_object);
62532diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62533index e7b87a0..a85d47a 100644
62534--- a/fs/fscache/operation.c
62535+++ b/fs/fscache/operation.c
62536@@ -17,7 +17,7 @@
62537 #include <linux/slab.h>
62538 #include "internal.h"
62539
62540-atomic_t fscache_op_debug_id;
62541+atomic_unchecked_t fscache_op_debug_id;
62542 EXPORT_SYMBOL(fscache_op_debug_id);
62543
62544 /**
62545@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62546 ASSERTCMP(atomic_read(&op->usage), >, 0);
62547 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62548
62549- fscache_stat(&fscache_n_op_enqueue);
62550+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62551 switch (op->flags & FSCACHE_OP_TYPE) {
62552 case FSCACHE_OP_ASYNC:
62553 _debug("queue async");
62554@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62555 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62556 if (op->processor)
62557 fscache_enqueue_operation(op);
62558- fscache_stat(&fscache_n_op_run);
62559+ fscache_stat_unchecked(&fscache_n_op_run);
62560 }
62561
62562 /*
62563@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62564 if (object->n_in_progress > 0) {
62565 atomic_inc(&op->usage);
62566 list_add_tail(&op->pend_link, &object->pending_ops);
62567- fscache_stat(&fscache_n_op_pend);
62568+ fscache_stat_unchecked(&fscache_n_op_pend);
62569 } else if (!list_empty(&object->pending_ops)) {
62570 atomic_inc(&op->usage);
62571 list_add_tail(&op->pend_link, &object->pending_ops);
62572- fscache_stat(&fscache_n_op_pend);
62573+ fscache_stat_unchecked(&fscache_n_op_pend);
62574 fscache_start_operations(object);
62575 } else {
62576 ASSERTCMP(object->n_in_progress, ==, 0);
62577@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62578 object->n_exclusive++; /* reads and writes must wait */
62579 atomic_inc(&op->usage);
62580 list_add_tail(&op->pend_link, &object->pending_ops);
62581- fscache_stat(&fscache_n_op_pend);
62582+ fscache_stat_unchecked(&fscache_n_op_pend);
62583 ret = 0;
62584 } else {
62585 /* If we're in any other state, there must have been an I/O
62586@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62587 if (object->n_exclusive > 0) {
62588 atomic_inc(&op->usage);
62589 list_add_tail(&op->pend_link, &object->pending_ops);
62590- fscache_stat(&fscache_n_op_pend);
62591+ fscache_stat_unchecked(&fscache_n_op_pend);
62592 } else if (!list_empty(&object->pending_ops)) {
62593 atomic_inc(&op->usage);
62594 list_add_tail(&op->pend_link, &object->pending_ops);
62595- fscache_stat(&fscache_n_op_pend);
62596+ fscache_stat_unchecked(&fscache_n_op_pend);
62597 fscache_start_operations(object);
62598 } else {
62599 ASSERTCMP(object->n_exclusive, ==, 0);
62600@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62601 object->n_ops++;
62602 atomic_inc(&op->usage);
62603 list_add_tail(&op->pend_link, &object->pending_ops);
62604- fscache_stat(&fscache_n_op_pend);
62605+ fscache_stat_unchecked(&fscache_n_op_pend);
62606 ret = 0;
62607 } else if (fscache_object_is_dying(object)) {
62608- fscache_stat(&fscache_n_op_rejected);
62609+ fscache_stat_unchecked(&fscache_n_op_rejected);
62610 op->state = FSCACHE_OP_ST_CANCELLED;
62611 ret = -ENOBUFS;
62612 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62613@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62614 ret = -EBUSY;
62615 if (op->state == FSCACHE_OP_ST_PENDING) {
62616 ASSERT(!list_empty(&op->pend_link));
62617- fscache_stat(&fscache_n_op_cancelled);
62618+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62619 list_del_init(&op->pend_link);
62620 if (do_cancel)
62621 do_cancel(op);
62622@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62623 while (!list_empty(&object->pending_ops)) {
62624 op = list_entry(object->pending_ops.next,
62625 struct fscache_operation, pend_link);
62626- fscache_stat(&fscache_n_op_cancelled);
62627+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62628 list_del_init(&op->pend_link);
62629
62630 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62631@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62632 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62633 op->state = FSCACHE_OP_ST_DEAD;
62634
62635- fscache_stat(&fscache_n_op_release);
62636+ fscache_stat_unchecked(&fscache_n_op_release);
62637
62638 if (op->release) {
62639 op->release(op);
62640@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62641 * lock, and defer it otherwise */
62642 if (!spin_trylock(&object->lock)) {
62643 _debug("defer put");
62644- fscache_stat(&fscache_n_op_deferred_release);
62645+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62646
62647 cache = object->cache;
62648 spin_lock(&cache->op_gc_list_lock);
62649@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62650
62651 _debug("GC DEFERRED REL OBJ%x OP%x",
62652 object->debug_id, op->debug_id);
62653- fscache_stat(&fscache_n_op_gc);
62654+ fscache_stat_unchecked(&fscache_n_op_gc);
62655
62656 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62657 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62658diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62659index de33b3f..8be4d29 100644
62660--- a/fs/fscache/page.c
62661+++ b/fs/fscache/page.c
62662@@ -74,7 +74,7 @@ try_again:
62663 val = radix_tree_lookup(&cookie->stores, page->index);
62664 if (!val) {
62665 rcu_read_unlock();
62666- fscache_stat(&fscache_n_store_vmscan_not_storing);
62667+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62668 __fscache_uncache_page(cookie, page);
62669 return true;
62670 }
62671@@ -104,11 +104,11 @@ try_again:
62672 spin_unlock(&cookie->stores_lock);
62673
62674 if (xpage) {
62675- fscache_stat(&fscache_n_store_vmscan_cancelled);
62676- fscache_stat(&fscache_n_store_radix_deletes);
62677+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62678+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62679 ASSERTCMP(xpage, ==, page);
62680 } else {
62681- fscache_stat(&fscache_n_store_vmscan_gone);
62682+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62683 }
62684
62685 wake_up_bit(&cookie->flags, 0);
62686@@ -123,11 +123,11 @@ page_busy:
62687 * sleeping on memory allocation, so we may need to impose a timeout
62688 * too. */
62689 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62690- fscache_stat(&fscache_n_store_vmscan_busy);
62691+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62692 return false;
62693 }
62694
62695- fscache_stat(&fscache_n_store_vmscan_wait);
62696+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62697 if (!release_page_wait_timeout(cookie, page))
62698 _debug("fscache writeout timeout page: %p{%lx}",
62699 page, page->index);
62700@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62701 FSCACHE_COOKIE_STORING_TAG);
62702 if (!radix_tree_tag_get(&cookie->stores, page->index,
62703 FSCACHE_COOKIE_PENDING_TAG)) {
62704- fscache_stat(&fscache_n_store_radix_deletes);
62705+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62706 xpage = radix_tree_delete(&cookie->stores, page->index);
62707 }
62708 spin_unlock(&cookie->stores_lock);
62709@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62710
62711 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62712
62713- fscache_stat(&fscache_n_attr_changed_calls);
62714+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62715
62716 if (fscache_object_is_active(object)) {
62717 fscache_stat(&fscache_n_cop_attr_changed);
62718@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62719
62720 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62721
62722- fscache_stat(&fscache_n_attr_changed);
62723+ fscache_stat_unchecked(&fscache_n_attr_changed);
62724
62725 op = kzalloc(sizeof(*op), GFP_KERNEL);
62726 if (!op) {
62727- fscache_stat(&fscache_n_attr_changed_nomem);
62728+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62729 _leave(" = -ENOMEM");
62730 return -ENOMEM;
62731 }
62732@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62733 if (fscache_submit_exclusive_op(object, op) < 0)
62734 goto nobufs_dec;
62735 spin_unlock(&cookie->lock);
62736- fscache_stat(&fscache_n_attr_changed_ok);
62737+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62738 fscache_put_operation(op);
62739 _leave(" = 0");
62740 return 0;
62741@@ -242,7 +242,7 @@ nobufs:
62742 kfree(op);
62743 if (wake_cookie)
62744 __fscache_wake_unused_cookie(cookie);
62745- fscache_stat(&fscache_n_attr_changed_nobufs);
62746+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62747 _leave(" = %d", -ENOBUFS);
62748 return -ENOBUFS;
62749 }
62750@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62751 /* allocate a retrieval operation and attempt to submit it */
62752 op = kzalloc(sizeof(*op), GFP_NOIO);
62753 if (!op) {
62754- fscache_stat(&fscache_n_retrievals_nomem);
62755+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62756 return NULL;
62757 }
62758
62759@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62760 return 0;
62761 }
62762
62763- fscache_stat(&fscache_n_retrievals_wait);
62764+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62765
62766 jif = jiffies;
62767 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62768 TASK_INTERRUPTIBLE) != 0) {
62769- fscache_stat(&fscache_n_retrievals_intr);
62770+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62771 _leave(" = -ERESTARTSYS");
62772 return -ERESTARTSYS;
62773 }
62774@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62775 */
62776 int fscache_wait_for_operation_activation(struct fscache_object *object,
62777 struct fscache_operation *op,
62778- atomic_t *stat_op_waits,
62779- atomic_t *stat_object_dead,
62780+ atomic_unchecked_t *stat_op_waits,
62781+ atomic_unchecked_t *stat_object_dead,
62782 void (*do_cancel)(struct fscache_operation *))
62783 {
62784 int ret;
62785@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62786
62787 _debug(">>> WT");
62788 if (stat_op_waits)
62789- fscache_stat(stat_op_waits);
62790+ fscache_stat_unchecked(stat_op_waits);
62791 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62792 TASK_INTERRUPTIBLE) != 0) {
62793 ret = fscache_cancel_op(op, do_cancel);
62794@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62795 check_if_dead:
62796 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62797 if (stat_object_dead)
62798- fscache_stat(stat_object_dead);
62799+ fscache_stat_unchecked(stat_object_dead);
62800 _leave(" = -ENOBUFS [cancelled]");
62801 return -ENOBUFS;
62802 }
62803@@ -381,7 +381,7 @@ check_if_dead:
62804 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62805 fscache_cancel_op(op, do_cancel);
62806 if (stat_object_dead)
62807- fscache_stat(stat_object_dead);
62808+ fscache_stat_unchecked(stat_object_dead);
62809 return -ENOBUFS;
62810 }
62811 return 0;
62812@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62813
62814 _enter("%p,%p,,,", cookie, page);
62815
62816- fscache_stat(&fscache_n_retrievals);
62817+ fscache_stat_unchecked(&fscache_n_retrievals);
62818
62819 if (hlist_empty(&cookie->backing_objects))
62820 goto nobufs;
62821@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62822 goto nobufs_unlock_dec;
62823 spin_unlock(&cookie->lock);
62824
62825- fscache_stat(&fscache_n_retrieval_ops);
62826+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62827
62828 /* pin the netfs read context in case we need to do the actual netfs
62829 * read because we've encountered a cache read failure */
62830@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62831
62832 error:
62833 if (ret == -ENOMEM)
62834- fscache_stat(&fscache_n_retrievals_nomem);
62835+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62836 else if (ret == -ERESTARTSYS)
62837- fscache_stat(&fscache_n_retrievals_intr);
62838+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62839 else if (ret == -ENODATA)
62840- fscache_stat(&fscache_n_retrievals_nodata);
62841+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62842 else if (ret < 0)
62843- fscache_stat(&fscache_n_retrievals_nobufs);
62844+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62845 else
62846- fscache_stat(&fscache_n_retrievals_ok);
62847+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62848
62849 fscache_put_retrieval(op);
62850 _leave(" = %d", ret);
62851@@ -505,7 +505,7 @@ nobufs_unlock:
62852 __fscache_wake_unused_cookie(cookie);
62853 kfree(op);
62854 nobufs:
62855- fscache_stat(&fscache_n_retrievals_nobufs);
62856+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62857 _leave(" = -ENOBUFS");
62858 return -ENOBUFS;
62859 }
62860@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62861
62862 _enter("%p,,%d,,,", cookie, *nr_pages);
62863
62864- fscache_stat(&fscache_n_retrievals);
62865+ fscache_stat_unchecked(&fscache_n_retrievals);
62866
62867 if (hlist_empty(&cookie->backing_objects))
62868 goto nobufs;
62869@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62870 goto nobufs_unlock_dec;
62871 spin_unlock(&cookie->lock);
62872
62873- fscache_stat(&fscache_n_retrieval_ops);
62874+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62875
62876 /* pin the netfs read context in case we need to do the actual netfs
62877 * read because we've encountered a cache read failure */
62878@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62879
62880 error:
62881 if (ret == -ENOMEM)
62882- fscache_stat(&fscache_n_retrievals_nomem);
62883+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62884 else if (ret == -ERESTARTSYS)
62885- fscache_stat(&fscache_n_retrievals_intr);
62886+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62887 else if (ret == -ENODATA)
62888- fscache_stat(&fscache_n_retrievals_nodata);
62889+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62890 else if (ret < 0)
62891- fscache_stat(&fscache_n_retrievals_nobufs);
62892+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62893 else
62894- fscache_stat(&fscache_n_retrievals_ok);
62895+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62896
62897 fscache_put_retrieval(op);
62898 _leave(" = %d", ret);
62899@@ -636,7 +636,7 @@ nobufs_unlock:
62900 if (wake_cookie)
62901 __fscache_wake_unused_cookie(cookie);
62902 nobufs:
62903- fscache_stat(&fscache_n_retrievals_nobufs);
62904+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62905 _leave(" = -ENOBUFS");
62906 return -ENOBUFS;
62907 }
62908@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62909
62910 _enter("%p,%p,,,", cookie, page);
62911
62912- fscache_stat(&fscache_n_allocs);
62913+ fscache_stat_unchecked(&fscache_n_allocs);
62914
62915 if (hlist_empty(&cookie->backing_objects))
62916 goto nobufs;
62917@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62918 goto nobufs_unlock_dec;
62919 spin_unlock(&cookie->lock);
62920
62921- fscache_stat(&fscache_n_alloc_ops);
62922+ fscache_stat_unchecked(&fscache_n_alloc_ops);
62923
62924 ret = fscache_wait_for_operation_activation(
62925 object, &op->op,
62926@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62927
62928 error:
62929 if (ret == -ERESTARTSYS)
62930- fscache_stat(&fscache_n_allocs_intr);
62931+ fscache_stat_unchecked(&fscache_n_allocs_intr);
62932 else if (ret < 0)
62933- fscache_stat(&fscache_n_allocs_nobufs);
62934+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62935 else
62936- fscache_stat(&fscache_n_allocs_ok);
62937+ fscache_stat_unchecked(&fscache_n_allocs_ok);
62938
62939 fscache_put_retrieval(op);
62940 _leave(" = %d", ret);
62941@@ -730,7 +730,7 @@ nobufs_unlock:
62942 if (wake_cookie)
62943 __fscache_wake_unused_cookie(cookie);
62944 nobufs:
62945- fscache_stat(&fscache_n_allocs_nobufs);
62946+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62947 _leave(" = -ENOBUFS");
62948 return -ENOBUFS;
62949 }
62950@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62951
62952 spin_lock(&cookie->stores_lock);
62953
62954- fscache_stat(&fscache_n_store_calls);
62955+ fscache_stat_unchecked(&fscache_n_store_calls);
62956
62957 /* find a page to store */
62958 page = NULL;
62959@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62960 page = results[0];
62961 _debug("gang %d [%lx]", n, page->index);
62962 if (page->index > op->store_limit) {
62963- fscache_stat(&fscache_n_store_pages_over_limit);
62964+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
62965 goto superseded;
62966 }
62967
62968@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62969 spin_unlock(&cookie->stores_lock);
62970 spin_unlock(&object->lock);
62971
62972- fscache_stat(&fscache_n_store_pages);
62973+ fscache_stat_unchecked(&fscache_n_store_pages);
62974 fscache_stat(&fscache_n_cop_write_page);
62975 ret = object->cache->ops->write_page(op, page);
62976 fscache_stat_d(&fscache_n_cop_write_page);
62977@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62978 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62979 ASSERT(PageFsCache(page));
62980
62981- fscache_stat(&fscache_n_stores);
62982+ fscache_stat_unchecked(&fscache_n_stores);
62983
62984 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
62985 _leave(" = -ENOBUFS [invalidating]");
62986@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62987 spin_unlock(&cookie->stores_lock);
62988 spin_unlock(&object->lock);
62989
62990- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
62991+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62992 op->store_limit = object->store_limit;
62993
62994 __fscache_use_cookie(cookie);
62995@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62996
62997 spin_unlock(&cookie->lock);
62998 radix_tree_preload_end();
62999- fscache_stat(&fscache_n_store_ops);
63000- fscache_stat(&fscache_n_stores_ok);
63001+ fscache_stat_unchecked(&fscache_n_store_ops);
63002+ fscache_stat_unchecked(&fscache_n_stores_ok);
63003
63004 /* the work queue now carries its own ref on the object */
63005 fscache_put_operation(&op->op);
63006@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63007 return 0;
63008
63009 already_queued:
63010- fscache_stat(&fscache_n_stores_again);
63011+ fscache_stat_unchecked(&fscache_n_stores_again);
63012 already_pending:
63013 spin_unlock(&cookie->stores_lock);
63014 spin_unlock(&object->lock);
63015 spin_unlock(&cookie->lock);
63016 radix_tree_preload_end();
63017 kfree(op);
63018- fscache_stat(&fscache_n_stores_ok);
63019+ fscache_stat_unchecked(&fscache_n_stores_ok);
63020 _leave(" = 0");
63021 return 0;
63022
63023@@ -1039,14 +1039,14 @@ nobufs:
63024 kfree(op);
63025 if (wake_cookie)
63026 __fscache_wake_unused_cookie(cookie);
63027- fscache_stat(&fscache_n_stores_nobufs);
63028+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63029 _leave(" = -ENOBUFS");
63030 return -ENOBUFS;
63031
63032 nomem_free:
63033 kfree(op);
63034 nomem:
63035- fscache_stat(&fscache_n_stores_oom);
63036+ fscache_stat_unchecked(&fscache_n_stores_oom);
63037 _leave(" = -ENOMEM");
63038 return -ENOMEM;
63039 }
63040@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63041 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63042 ASSERTCMP(page, !=, NULL);
63043
63044- fscache_stat(&fscache_n_uncaches);
63045+ fscache_stat_unchecked(&fscache_n_uncaches);
63046
63047 /* cache withdrawal may beat us to it */
63048 if (!PageFsCache(page))
63049@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63050 struct fscache_cookie *cookie = op->op.object->cookie;
63051
63052 #ifdef CONFIG_FSCACHE_STATS
63053- atomic_inc(&fscache_n_marks);
63054+ atomic_inc_unchecked(&fscache_n_marks);
63055 #endif
63056
63057 _debug("- mark %p{%lx}", page, page->index);
63058diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63059index 40d13c7..ddf52b9 100644
63060--- a/fs/fscache/stats.c
63061+++ b/fs/fscache/stats.c
63062@@ -18,99 +18,99 @@
63063 /*
63064 * operation counters
63065 */
63066-atomic_t fscache_n_op_pend;
63067-atomic_t fscache_n_op_run;
63068-atomic_t fscache_n_op_enqueue;
63069-atomic_t fscache_n_op_requeue;
63070-atomic_t fscache_n_op_deferred_release;
63071-atomic_t fscache_n_op_release;
63072-atomic_t fscache_n_op_gc;
63073-atomic_t fscache_n_op_cancelled;
63074-atomic_t fscache_n_op_rejected;
63075+atomic_unchecked_t fscache_n_op_pend;
63076+atomic_unchecked_t fscache_n_op_run;
63077+atomic_unchecked_t fscache_n_op_enqueue;
63078+atomic_unchecked_t fscache_n_op_requeue;
63079+atomic_unchecked_t fscache_n_op_deferred_release;
63080+atomic_unchecked_t fscache_n_op_release;
63081+atomic_unchecked_t fscache_n_op_gc;
63082+atomic_unchecked_t fscache_n_op_cancelled;
63083+atomic_unchecked_t fscache_n_op_rejected;
63084
63085-atomic_t fscache_n_attr_changed;
63086-atomic_t fscache_n_attr_changed_ok;
63087-atomic_t fscache_n_attr_changed_nobufs;
63088-atomic_t fscache_n_attr_changed_nomem;
63089-atomic_t fscache_n_attr_changed_calls;
63090+atomic_unchecked_t fscache_n_attr_changed;
63091+atomic_unchecked_t fscache_n_attr_changed_ok;
63092+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63093+atomic_unchecked_t fscache_n_attr_changed_nomem;
63094+atomic_unchecked_t fscache_n_attr_changed_calls;
63095
63096-atomic_t fscache_n_allocs;
63097-atomic_t fscache_n_allocs_ok;
63098-atomic_t fscache_n_allocs_wait;
63099-atomic_t fscache_n_allocs_nobufs;
63100-atomic_t fscache_n_allocs_intr;
63101-atomic_t fscache_n_allocs_object_dead;
63102-atomic_t fscache_n_alloc_ops;
63103-atomic_t fscache_n_alloc_op_waits;
63104+atomic_unchecked_t fscache_n_allocs;
63105+atomic_unchecked_t fscache_n_allocs_ok;
63106+atomic_unchecked_t fscache_n_allocs_wait;
63107+atomic_unchecked_t fscache_n_allocs_nobufs;
63108+atomic_unchecked_t fscache_n_allocs_intr;
63109+atomic_unchecked_t fscache_n_allocs_object_dead;
63110+atomic_unchecked_t fscache_n_alloc_ops;
63111+atomic_unchecked_t fscache_n_alloc_op_waits;
63112
63113-atomic_t fscache_n_retrievals;
63114-atomic_t fscache_n_retrievals_ok;
63115-atomic_t fscache_n_retrievals_wait;
63116-atomic_t fscache_n_retrievals_nodata;
63117-atomic_t fscache_n_retrievals_nobufs;
63118-atomic_t fscache_n_retrievals_intr;
63119-atomic_t fscache_n_retrievals_nomem;
63120-atomic_t fscache_n_retrievals_object_dead;
63121-atomic_t fscache_n_retrieval_ops;
63122-atomic_t fscache_n_retrieval_op_waits;
63123+atomic_unchecked_t fscache_n_retrievals;
63124+atomic_unchecked_t fscache_n_retrievals_ok;
63125+atomic_unchecked_t fscache_n_retrievals_wait;
63126+atomic_unchecked_t fscache_n_retrievals_nodata;
63127+atomic_unchecked_t fscache_n_retrievals_nobufs;
63128+atomic_unchecked_t fscache_n_retrievals_intr;
63129+atomic_unchecked_t fscache_n_retrievals_nomem;
63130+atomic_unchecked_t fscache_n_retrievals_object_dead;
63131+atomic_unchecked_t fscache_n_retrieval_ops;
63132+atomic_unchecked_t fscache_n_retrieval_op_waits;
63133
63134-atomic_t fscache_n_stores;
63135-atomic_t fscache_n_stores_ok;
63136-atomic_t fscache_n_stores_again;
63137-atomic_t fscache_n_stores_nobufs;
63138-atomic_t fscache_n_stores_oom;
63139-atomic_t fscache_n_store_ops;
63140-atomic_t fscache_n_store_calls;
63141-atomic_t fscache_n_store_pages;
63142-atomic_t fscache_n_store_radix_deletes;
63143-atomic_t fscache_n_store_pages_over_limit;
63144+atomic_unchecked_t fscache_n_stores;
63145+atomic_unchecked_t fscache_n_stores_ok;
63146+atomic_unchecked_t fscache_n_stores_again;
63147+atomic_unchecked_t fscache_n_stores_nobufs;
63148+atomic_unchecked_t fscache_n_stores_oom;
63149+atomic_unchecked_t fscache_n_store_ops;
63150+atomic_unchecked_t fscache_n_store_calls;
63151+atomic_unchecked_t fscache_n_store_pages;
63152+atomic_unchecked_t fscache_n_store_radix_deletes;
63153+atomic_unchecked_t fscache_n_store_pages_over_limit;
63154
63155-atomic_t fscache_n_store_vmscan_not_storing;
63156-atomic_t fscache_n_store_vmscan_gone;
63157-atomic_t fscache_n_store_vmscan_busy;
63158-atomic_t fscache_n_store_vmscan_cancelled;
63159-atomic_t fscache_n_store_vmscan_wait;
63160+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63161+atomic_unchecked_t fscache_n_store_vmscan_gone;
63162+atomic_unchecked_t fscache_n_store_vmscan_busy;
63163+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63164+atomic_unchecked_t fscache_n_store_vmscan_wait;
63165
63166-atomic_t fscache_n_marks;
63167-atomic_t fscache_n_uncaches;
63168+atomic_unchecked_t fscache_n_marks;
63169+atomic_unchecked_t fscache_n_uncaches;
63170
63171-atomic_t fscache_n_acquires;
63172-atomic_t fscache_n_acquires_null;
63173-atomic_t fscache_n_acquires_no_cache;
63174-atomic_t fscache_n_acquires_ok;
63175-atomic_t fscache_n_acquires_nobufs;
63176-atomic_t fscache_n_acquires_oom;
63177+atomic_unchecked_t fscache_n_acquires;
63178+atomic_unchecked_t fscache_n_acquires_null;
63179+atomic_unchecked_t fscache_n_acquires_no_cache;
63180+atomic_unchecked_t fscache_n_acquires_ok;
63181+atomic_unchecked_t fscache_n_acquires_nobufs;
63182+atomic_unchecked_t fscache_n_acquires_oom;
63183
63184-atomic_t fscache_n_invalidates;
63185-atomic_t fscache_n_invalidates_run;
63186+atomic_unchecked_t fscache_n_invalidates;
63187+atomic_unchecked_t fscache_n_invalidates_run;
63188
63189-atomic_t fscache_n_updates;
63190-atomic_t fscache_n_updates_null;
63191-atomic_t fscache_n_updates_run;
63192+atomic_unchecked_t fscache_n_updates;
63193+atomic_unchecked_t fscache_n_updates_null;
63194+atomic_unchecked_t fscache_n_updates_run;
63195
63196-atomic_t fscache_n_relinquishes;
63197-atomic_t fscache_n_relinquishes_null;
63198-atomic_t fscache_n_relinquishes_waitcrt;
63199-atomic_t fscache_n_relinquishes_retire;
63200+atomic_unchecked_t fscache_n_relinquishes;
63201+atomic_unchecked_t fscache_n_relinquishes_null;
63202+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63203+atomic_unchecked_t fscache_n_relinquishes_retire;
63204
63205-atomic_t fscache_n_cookie_index;
63206-atomic_t fscache_n_cookie_data;
63207-atomic_t fscache_n_cookie_special;
63208+atomic_unchecked_t fscache_n_cookie_index;
63209+atomic_unchecked_t fscache_n_cookie_data;
63210+atomic_unchecked_t fscache_n_cookie_special;
63211
63212-atomic_t fscache_n_object_alloc;
63213-atomic_t fscache_n_object_no_alloc;
63214-atomic_t fscache_n_object_lookups;
63215-atomic_t fscache_n_object_lookups_negative;
63216-atomic_t fscache_n_object_lookups_positive;
63217-atomic_t fscache_n_object_lookups_timed_out;
63218-atomic_t fscache_n_object_created;
63219-atomic_t fscache_n_object_avail;
63220-atomic_t fscache_n_object_dead;
63221+atomic_unchecked_t fscache_n_object_alloc;
63222+atomic_unchecked_t fscache_n_object_no_alloc;
63223+atomic_unchecked_t fscache_n_object_lookups;
63224+atomic_unchecked_t fscache_n_object_lookups_negative;
63225+atomic_unchecked_t fscache_n_object_lookups_positive;
63226+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63227+atomic_unchecked_t fscache_n_object_created;
63228+atomic_unchecked_t fscache_n_object_avail;
63229+atomic_unchecked_t fscache_n_object_dead;
63230
63231-atomic_t fscache_n_checkaux_none;
63232-atomic_t fscache_n_checkaux_okay;
63233-atomic_t fscache_n_checkaux_update;
63234-atomic_t fscache_n_checkaux_obsolete;
63235+atomic_unchecked_t fscache_n_checkaux_none;
63236+atomic_unchecked_t fscache_n_checkaux_okay;
63237+atomic_unchecked_t fscache_n_checkaux_update;
63238+atomic_unchecked_t fscache_n_checkaux_obsolete;
63239
63240 atomic_t fscache_n_cop_alloc_object;
63241 atomic_t fscache_n_cop_lookup_object;
63242@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63243 seq_puts(m, "FS-Cache statistics\n");
63244
63245 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63246- atomic_read(&fscache_n_cookie_index),
63247- atomic_read(&fscache_n_cookie_data),
63248- atomic_read(&fscache_n_cookie_special));
63249+ atomic_read_unchecked(&fscache_n_cookie_index),
63250+ atomic_read_unchecked(&fscache_n_cookie_data),
63251+ atomic_read_unchecked(&fscache_n_cookie_special));
63252
63253 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63254- atomic_read(&fscache_n_object_alloc),
63255- atomic_read(&fscache_n_object_no_alloc),
63256- atomic_read(&fscache_n_object_avail),
63257- atomic_read(&fscache_n_object_dead));
63258+ atomic_read_unchecked(&fscache_n_object_alloc),
63259+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63260+ atomic_read_unchecked(&fscache_n_object_avail),
63261+ atomic_read_unchecked(&fscache_n_object_dead));
63262 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63263- atomic_read(&fscache_n_checkaux_none),
63264- atomic_read(&fscache_n_checkaux_okay),
63265- atomic_read(&fscache_n_checkaux_update),
63266- atomic_read(&fscache_n_checkaux_obsolete));
63267+ atomic_read_unchecked(&fscache_n_checkaux_none),
63268+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63269+ atomic_read_unchecked(&fscache_n_checkaux_update),
63270+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63271
63272 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63273- atomic_read(&fscache_n_marks),
63274- atomic_read(&fscache_n_uncaches));
63275+ atomic_read_unchecked(&fscache_n_marks),
63276+ atomic_read_unchecked(&fscache_n_uncaches));
63277
63278 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63279 " oom=%u\n",
63280- atomic_read(&fscache_n_acquires),
63281- atomic_read(&fscache_n_acquires_null),
63282- atomic_read(&fscache_n_acquires_no_cache),
63283- atomic_read(&fscache_n_acquires_ok),
63284- atomic_read(&fscache_n_acquires_nobufs),
63285- atomic_read(&fscache_n_acquires_oom));
63286+ atomic_read_unchecked(&fscache_n_acquires),
63287+ atomic_read_unchecked(&fscache_n_acquires_null),
63288+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63289+ atomic_read_unchecked(&fscache_n_acquires_ok),
63290+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63291+ atomic_read_unchecked(&fscache_n_acquires_oom));
63292
63293 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63294- atomic_read(&fscache_n_object_lookups),
63295- atomic_read(&fscache_n_object_lookups_negative),
63296- atomic_read(&fscache_n_object_lookups_positive),
63297- atomic_read(&fscache_n_object_created),
63298- atomic_read(&fscache_n_object_lookups_timed_out));
63299+ atomic_read_unchecked(&fscache_n_object_lookups),
63300+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63301+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63302+ atomic_read_unchecked(&fscache_n_object_created),
63303+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63304
63305 seq_printf(m, "Invals : n=%u run=%u\n",
63306- atomic_read(&fscache_n_invalidates),
63307- atomic_read(&fscache_n_invalidates_run));
63308+ atomic_read_unchecked(&fscache_n_invalidates),
63309+ atomic_read_unchecked(&fscache_n_invalidates_run));
63310
63311 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63312- atomic_read(&fscache_n_updates),
63313- atomic_read(&fscache_n_updates_null),
63314- atomic_read(&fscache_n_updates_run));
63315+ atomic_read_unchecked(&fscache_n_updates),
63316+ atomic_read_unchecked(&fscache_n_updates_null),
63317+ atomic_read_unchecked(&fscache_n_updates_run));
63318
63319 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63320- atomic_read(&fscache_n_relinquishes),
63321- atomic_read(&fscache_n_relinquishes_null),
63322- atomic_read(&fscache_n_relinquishes_waitcrt),
63323- atomic_read(&fscache_n_relinquishes_retire));
63324+ atomic_read_unchecked(&fscache_n_relinquishes),
63325+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63326+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63327+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63328
63329 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63330- atomic_read(&fscache_n_attr_changed),
63331- atomic_read(&fscache_n_attr_changed_ok),
63332- atomic_read(&fscache_n_attr_changed_nobufs),
63333- atomic_read(&fscache_n_attr_changed_nomem),
63334- atomic_read(&fscache_n_attr_changed_calls));
63335+ atomic_read_unchecked(&fscache_n_attr_changed),
63336+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63337+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63338+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63339+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63340
63341 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63342- atomic_read(&fscache_n_allocs),
63343- atomic_read(&fscache_n_allocs_ok),
63344- atomic_read(&fscache_n_allocs_wait),
63345- atomic_read(&fscache_n_allocs_nobufs),
63346- atomic_read(&fscache_n_allocs_intr));
63347+ atomic_read_unchecked(&fscache_n_allocs),
63348+ atomic_read_unchecked(&fscache_n_allocs_ok),
63349+ atomic_read_unchecked(&fscache_n_allocs_wait),
63350+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63351+ atomic_read_unchecked(&fscache_n_allocs_intr));
63352 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63353- atomic_read(&fscache_n_alloc_ops),
63354- atomic_read(&fscache_n_alloc_op_waits),
63355- atomic_read(&fscache_n_allocs_object_dead));
63356+ atomic_read_unchecked(&fscache_n_alloc_ops),
63357+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63358+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63359
63360 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63361 " int=%u oom=%u\n",
63362- atomic_read(&fscache_n_retrievals),
63363- atomic_read(&fscache_n_retrievals_ok),
63364- atomic_read(&fscache_n_retrievals_wait),
63365- atomic_read(&fscache_n_retrievals_nodata),
63366- atomic_read(&fscache_n_retrievals_nobufs),
63367- atomic_read(&fscache_n_retrievals_intr),
63368- atomic_read(&fscache_n_retrievals_nomem));
63369+ atomic_read_unchecked(&fscache_n_retrievals),
63370+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63371+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63372+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63373+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63374+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63375+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63376 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63377- atomic_read(&fscache_n_retrieval_ops),
63378- atomic_read(&fscache_n_retrieval_op_waits),
63379- atomic_read(&fscache_n_retrievals_object_dead));
63380+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63381+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63382+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63383
63384 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63385- atomic_read(&fscache_n_stores),
63386- atomic_read(&fscache_n_stores_ok),
63387- atomic_read(&fscache_n_stores_again),
63388- atomic_read(&fscache_n_stores_nobufs),
63389- atomic_read(&fscache_n_stores_oom));
63390+ atomic_read_unchecked(&fscache_n_stores),
63391+ atomic_read_unchecked(&fscache_n_stores_ok),
63392+ atomic_read_unchecked(&fscache_n_stores_again),
63393+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63394+ atomic_read_unchecked(&fscache_n_stores_oom));
63395 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63396- atomic_read(&fscache_n_store_ops),
63397- atomic_read(&fscache_n_store_calls),
63398- atomic_read(&fscache_n_store_pages),
63399- atomic_read(&fscache_n_store_radix_deletes),
63400- atomic_read(&fscache_n_store_pages_over_limit));
63401+ atomic_read_unchecked(&fscache_n_store_ops),
63402+ atomic_read_unchecked(&fscache_n_store_calls),
63403+ atomic_read_unchecked(&fscache_n_store_pages),
63404+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63405+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63406
63407 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63408- atomic_read(&fscache_n_store_vmscan_not_storing),
63409- atomic_read(&fscache_n_store_vmscan_gone),
63410- atomic_read(&fscache_n_store_vmscan_busy),
63411- atomic_read(&fscache_n_store_vmscan_cancelled),
63412- atomic_read(&fscache_n_store_vmscan_wait));
63413+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63414+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63415+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63416+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63417+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63418
63419 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63420- atomic_read(&fscache_n_op_pend),
63421- atomic_read(&fscache_n_op_run),
63422- atomic_read(&fscache_n_op_enqueue),
63423- atomic_read(&fscache_n_op_cancelled),
63424- atomic_read(&fscache_n_op_rejected));
63425+ atomic_read_unchecked(&fscache_n_op_pend),
63426+ atomic_read_unchecked(&fscache_n_op_run),
63427+ atomic_read_unchecked(&fscache_n_op_enqueue),
63428+ atomic_read_unchecked(&fscache_n_op_cancelled),
63429+ atomic_read_unchecked(&fscache_n_op_rejected));
63430 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63431- atomic_read(&fscache_n_op_deferred_release),
63432- atomic_read(&fscache_n_op_release),
63433- atomic_read(&fscache_n_op_gc));
63434+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63435+ atomic_read_unchecked(&fscache_n_op_release),
63436+ atomic_read_unchecked(&fscache_n_op_gc));
63437
63438 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63439 atomic_read(&fscache_n_cop_alloc_object),
63440diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63441index 28d0c7a..04816b7 100644
63442--- a/fs/fuse/cuse.c
63443+++ b/fs/fuse/cuse.c
63444@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63445 INIT_LIST_HEAD(&cuse_conntbl[i]);
63446
63447 /* inherit and extend fuse_dev_operations */
63448- cuse_channel_fops = fuse_dev_operations;
63449- cuse_channel_fops.owner = THIS_MODULE;
63450- cuse_channel_fops.open = cuse_channel_open;
63451- cuse_channel_fops.release = cuse_channel_release;
63452+ pax_open_kernel();
63453+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63454+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63455+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63456+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63457+ pax_close_kernel();
63458
63459 cuse_class = class_create(THIS_MODULE, "cuse");
63460 if (IS_ERR(cuse_class))
63461diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63462index 71c4619..6a9f6d4 100644
63463--- a/fs/fuse/dev.c
63464+++ b/fs/fuse/dev.c
63465@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63466 ret = 0;
63467 pipe_lock(pipe);
63468
63469- if (!pipe->readers) {
63470+ if (!atomic_read(&pipe->readers)) {
63471 send_sig(SIGPIPE, current, 0);
63472 if (!ret)
63473 ret = -EPIPE;
63474@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63475 page_nr++;
63476 ret += buf->len;
63477
63478- if (pipe->files)
63479+ if (atomic_read(&pipe->files))
63480 do_wakeup = 1;
63481 }
63482
63483diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63484index 08e7b1a..d91c6ee 100644
63485--- a/fs/fuse/dir.c
63486+++ b/fs/fuse/dir.c
63487@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63488 return link;
63489 }
63490
63491-static void free_link(char *link)
63492+static void free_link(const char *link)
63493 {
63494 if (!IS_ERR(link))
63495 free_page((unsigned long) link);
63496diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63497index fd62cae..3494dfa 100644
63498--- a/fs/hostfs/hostfs_kern.c
63499+++ b/fs/hostfs/hostfs_kern.c
63500@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63501
63502 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63503 {
63504- char *s = nd_get_link(nd);
63505+ const char *s = nd_get_link(nd);
63506 if (!IS_ERR(s))
63507 __putname(s);
63508 }
63509diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63510index 5eba47f..d353c22 100644
63511--- a/fs/hugetlbfs/inode.c
63512+++ b/fs/hugetlbfs/inode.c
63513@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63514 struct mm_struct *mm = current->mm;
63515 struct vm_area_struct *vma;
63516 struct hstate *h = hstate_file(file);
63517+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63518 struct vm_unmapped_area_info info;
63519
63520 if (len & ~huge_page_mask(h))
63521@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63522 return addr;
63523 }
63524
63525+#ifdef CONFIG_PAX_RANDMMAP
63526+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63527+#endif
63528+
63529 if (addr) {
63530 addr = ALIGN(addr, huge_page_size(h));
63531 vma = find_vma(mm, addr);
63532- if (TASK_SIZE - len >= addr &&
63533- (!vma || addr + len <= vma->vm_start))
63534+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63535 return addr;
63536 }
63537
63538 info.flags = 0;
63539 info.length = len;
63540 info.low_limit = TASK_UNMAPPED_BASE;
63541+
63542+#ifdef CONFIG_PAX_RANDMMAP
63543+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63544+ info.low_limit += mm->delta_mmap;
63545+#endif
63546+
63547 info.high_limit = TASK_SIZE;
63548 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63549 info.align_offset = 0;
63550@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63551 };
63552 MODULE_ALIAS_FS("hugetlbfs");
63553
63554-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63555+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63556
63557 static int can_do_hugetlb_shm(void)
63558 {
63559diff --git a/fs/inode.c b/fs/inode.c
63560index aa149e7..46f1f65 100644
63561--- a/fs/inode.c
63562+++ b/fs/inode.c
63563@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63564 unsigned int *p = &get_cpu_var(last_ino);
63565 unsigned int res = *p;
63566
63567+start:
63568+
63569 #ifdef CONFIG_SMP
63570 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63571- static atomic_t shared_last_ino;
63572- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63573+ static atomic_unchecked_t shared_last_ino;
63574+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63575
63576 res = next - LAST_INO_BATCH;
63577 }
63578 #endif
63579
63580- *p = ++res;
63581+ if (unlikely(!++res))
63582+ goto start; /* never zero */
63583+ *p = res;
63584 put_cpu_var(last_ino);
63585 return res;
63586 }
63587diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63588index 4a6cf28..d3a29d3 100644
63589--- a/fs/jffs2/erase.c
63590+++ b/fs/jffs2/erase.c
63591@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63592 struct jffs2_unknown_node marker = {
63593 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63594 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63595- .totlen = cpu_to_je32(c->cleanmarker_size)
63596+ .totlen = cpu_to_je32(c->cleanmarker_size),
63597+ .hdr_crc = cpu_to_je32(0)
63598 };
63599
63600 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63601diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63602index 09ed551..45684f8 100644
63603--- a/fs/jffs2/wbuf.c
63604+++ b/fs/jffs2/wbuf.c
63605@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63606 {
63607 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63608 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63609- .totlen = constant_cpu_to_je32(8)
63610+ .totlen = constant_cpu_to_je32(8),
63611+ .hdr_crc = constant_cpu_to_je32(0)
63612 };
63613
63614 /*
63615diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63616index 16c3a95..e9cb75d 100644
63617--- a/fs/jfs/super.c
63618+++ b/fs/jfs/super.c
63619@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63620
63621 jfs_inode_cachep =
63622 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63623- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63624+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63625 init_once);
63626 if (jfs_inode_cachep == NULL)
63627 return -ENOMEM;
63628diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63629index 2d881b3..fe1ac77 100644
63630--- a/fs/kernfs/dir.c
63631+++ b/fs/kernfs/dir.c
63632@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63633 *
63634 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63635 */
63636-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63637+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63638 {
63639 unsigned long hash = init_name_hash();
63640 unsigned int len = strlen(name);
63641@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63642 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63643
63644 kernfs_put_active(parent);
63645+
63646+ if (!ret) {
63647+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63648+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63649+ }
63650+
63651 return ret;
63652 }
63653
63654diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63655index ddc9f96..4e450ad 100644
63656--- a/fs/kernfs/file.c
63657+++ b/fs/kernfs/file.c
63658@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63659
63660 struct kernfs_open_node {
63661 atomic_t refcnt;
63662- atomic_t event;
63663+ atomic_unchecked_t event;
63664 wait_queue_head_t poll;
63665 struct list_head files; /* goes through kernfs_open_file.list */
63666 };
63667@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63668 {
63669 struct kernfs_open_file *of = sf->private;
63670
63671- of->event = atomic_read(&of->kn->attr.open->event);
63672+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63673
63674 return of->kn->attr.ops->seq_show(sf, v);
63675 }
63676@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63677 {
63678 struct kernfs_open_file *of = kernfs_of(file);
63679 const struct kernfs_ops *ops;
63680- size_t len;
63681+ ssize_t len;
63682 char *buf;
63683
63684 if (of->atomic_write_len) {
63685@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63686 return ret;
63687 }
63688
63689-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63690- void *buf, int len, int write)
63691+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63692+ void *buf, size_t len, int write)
63693 {
63694 struct file *file = vma->vm_file;
63695 struct kernfs_open_file *of = kernfs_of(file);
63696- int ret;
63697+ ssize_t ret;
63698
63699 if (!of->vm_ops)
63700 return -EINVAL;
63701@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63702 return -ENOMEM;
63703
63704 atomic_set(&new_on->refcnt, 0);
63705- atomic_set(&new_on->event, 1);
63706+ atomic_set_unchecked(&new_on->event, 1);
63707 init_waitqueue_head(&new_on->poll);
63708 INIT_LIST_HEAD(&new_on->files);
63709 goto retry;
63710@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63711
63712 kernfs_put_active(kn);
63713
63714- if (of->event != atomic_read(&on->event))
63715+ if (of->event != atomic_read_unchecked(&on->event))
63716 goto trigger;
63717
63718 return DEFAULT_POLLMASK;
63719@@ -823,7 +823,7 @@ repeat:
63720
63721 on = kn->attr.open;
63722 if (on) {
63723- atomic_inc(&on->event);
63724+ atomic_inc_unchecked(&on->event);
63725 wake_up_interruptible(&on->poll);
63726 }
63727
63728diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63729index 8a19889..4c3069a 100644
63730--- a/fs/kernfs/symlink.c
63731+++ b/fs/kernfs/symlink.c
63732@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63733 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63734 void *cookie)
63735 {
63736- char *page = nd_get_link(nd);
63737+ const char *page = nd_get_link(nd);
63738 if (!IS_ERR(page))
63739 free_page((unsigned long)page);
63740 }
63741diff --git a/fs/libfs.c b/fs/libfs.c
63742index 005843c..06c4191 100644
63743--- a/fs/libfs.c
63744+++ b/fs/libfs.c
63745@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63746
63747 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63748 struct dentry *next = list_entry(p, struct dentry, d_child);
63749+ char d_name[sizeof(next->d_iname)];
63750+ const unsigned char *name;
63751+
63752 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63753 if (!simple_positive(next)) {
63754 spin_unlock(&next->d_lock);
63755@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63756
63757 spin_unlock(&next->d_lock);
63758 spin_unlock(&dentry->d_lock);
63759- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63760+ name = next->d_name.name;
63761+ if (name == next->d_iname) {
63762+ memcpy(d_name, name, next->d_name.len);
63763+ name = d_name;
63764+ }
63765+ if (!dir_emit(ctx, name, next->d_name.len,
63766 next->d_inode->i_ino, dt_type(next->d_inode)))
63767 return 0;
63768 spin_lock(&dentry->d_lock);
63769@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63770 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63771 void *cookie)
63772 {
63773- char *s = nd_get_link(nd);
63774+ const char *s = nd_get_link(nd);
63775 if (!IS_ERR(s))
63776 kfree(s);
63777 }
63778diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63779index acd3947..1f896e2 100644
63780--- a/fs/lockd/clntproc.c
63781+++ b/fs/lockd/clntproc.c
63782@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63783 /*
63784 * Cookie counter for NLM requests
63785 */
63786-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63787+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63788
63789 void nlmclnt_next_cookie(struct nlm_cookie *c)
63790 {
63791- u32 cookie = atomic_inc_return(&nlm_cookie);
63792+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63793
63794 memcpy(c->data, &cookie, 4);
63795 c->len=4;
63796diff --git a/fs/locks.c b/fs/locks.c
63797index 59e2f90..bd69071 100644
63798--- a/fs/locks.c
63799+++ b/fs/locks.c
63800@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63801 locks_remove_posix(filp, filp);
63802
63803 if (filp->f_op->flock) {
63804- struct file_lock fl = {
63805+ struct file_lock flock = {
63806 .fl_owner = filp,
63807 .fl_pid = current->tgid,
63808 .fl_file = filp,
63809@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63810 .fl_type = F_UNLCK,
63811 .fl_end = OFFSET_MAX,
63812 };
63813- filp->f_op->flock(filp, F_SETLKW, &fl);
63814- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63815- fl.fl_ops->fl_release_private(&fl);
63816+ filp->f_op->flock(filp, F_SETLKW, &flock);
63817+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63818+ flock.fl_ops->fl_release_private(&flock);
63819 }
63820
63821 spin_lock(&inode->i_lock);
63822diff --git a/fs/mount.h b/fs/mount.h
63823index 0ad6f76..a04c146 100644
63824--- a/fs/mount.h
63825+++ b/fs/mount.h
63826@@ -12,7 +12,7 @@ struct mnt_namespace {
63827 u64 seq; /* Sequence number to prevent loops */
63828 wait_queue_head_t poll;
63829 u64 event;
63830-};
63831+} __randomize_layout;
63832
63833 struct mnt_pcp {
63834 int mnt_count;
63835@@ -63,7 +63,7 @@ struct mount {
63836 int mnt_expiry_mark; /* true if marked for expiry */
63837 struct hlist_head mnt_pins;
63838 struct path mnt_ex_mountpoint;
63839-};
63840+} __randomize_layout;
63841
63842 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63843
63844diff --git a/fs/namei.c b/fs/namei.c
63845index bc35b02..7ed1f1d 100644
63846--- a/fs/namei.c
63847+++ b/fs/namei.c
63848@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63849 if (ret != -EACCES)
63850 return ret;
63851
63852+#ifdef CONFIG_GRKERNSEC
63853+ /* we'll block if we have to log due to a denied capability use */
63854+ if (mask & MAY_NOT_BLOCK)
63855+ return -ECHILD;
63856+#endif
63857+
63858 if (S_ISDIR(inode->i_mode)) {
63859 /* DACs are overridable for directories */
63860- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63861- return 0;
63862 if (!(mask & MAY_WRITE))
63863- if (capable_wrt_inode_uidgid(inode,
63864- CAP_DAC_READ_SEARCH))
63865+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63866+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63867 return 0;
63868+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63869+ return 0;
63870 return -EACCES;
63871 }
63872 /*
63873+ * Searching includes executable on directories, else just read.
63874+ */
63875+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63876+ if (mask == MAY_READ)
63877+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63878+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63879+ return 0;
63880+
63881+ /*
63882 * Read/write DACs are always overridable.
63883 * Executable DACs are overridable when there is
63884 * at least one exec bit set.
63885@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63886 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63887 return 0;
63888
63889- /*
63890- * Searching includes executable on directories, else just read.
63891- */
63892- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63893- if (mask == MAY_READ)
63894- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63895- return 0;
63896-
63897 return -EACCES;
63898 }
63899 EXPORT_SYMBOL(generic_permission);
63900@@ -497,7 +504,7 @@ struct nameidata {
63901 int last_type;
63902 unsigned depth;
63903 struct file *base;
63904- char *saved_names[MAX_NESTED_LINKS + 1];
63905+ const char *saved_names[MAX_NESTED_LINKS + 1];
63906 };
63907
63908 /*
63909@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63910 nd->flags |= LOOKUP_JUMPED;
63911 }
63912
63913-void nd_set_link(struct nameidata *nd, char *path)
63914+void nd_set_link(struct nameidata *nd, const char *path)
63915 {
63916 nd->saved_names[nd->depth] = path;
63917 }
63918 EXPORT_SYMBOL(nd_set_link);
63919
63920-char *nd_get_link(struct nameidata *nd)
63921+const char *nd_get_link(const struct nameidata *nd)
63922 {
63923 return nd->saved_names[nd->depth];
63924 }
63925@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63926 {
63927 struct dentry *dentry = link->dentry;
63928 int error;
63929- char *s;
63930+ const char *s;
63931
63932 BUG_ON(nd->flags & LOOKUP_RCU);
63933
63934@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63935 if (error)
63936 goto out_put_nd_path;
63937
63938+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
63939+ dentry->d_inode, dentry, nd->path.mnt)) {
63940+ error = -EACCES;
63941+ goto out_put_nd_path;
63942+ }
63943+
63944 nd->last_type = LAST_BIND;
63945 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
63946 error = PTR_ERR(*p);
63947@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
63948 if (res)
63949 break;
63950 res = walk_component(nd, path, LOOKUP_FOLLOW);
63951+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
63952+ res = -EACCES;
63953 put_link(nd, &link, cookie);
63954 } while (res > 0);
63955
63956@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
63957 static inline u64 hash_name(const char *name)
63958 {
63959 unsigned long a, b, adata, bdata, mask, hash, len;
63960- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63961+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63962
63963 hash = a = 0;
63964 len = -sizeof(unsigned long);
63965@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
63966 if (err)
63967 break;
63968 err = lookup_last(nd, &path);
63969+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
63970+ err = -EACCES;
63971 put_link(nd, &link, cookie);
63972 }
63973 }
63974@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
63975 if (!err)
63976 err = complete_walk(nd);
63977
63978+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
63979+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
63980+ path_put(&nd->path);
63981+ err = -ENOENT;
63982+ }
63983+ }
63984+
63985 if (!err && nd->flags & LOOKUP_DIRECTORY) {
63986 if (!d_can_lookup(nd->path.dentry)) {
63987 path_put(&nd->path);
63988@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
63989 retval = path_lookupat(dfd, name->name,
63990 flags | LOOKUP_REVAL, nd);
63991
63992- if (likely(!retval))
63993+ if (likely(!retval)) {
63994 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
63995+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
63996+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
63997+ path_put(&nd->path);
63998+ return -ENOENT;
63999+ }
64000+ }
64001+ }
64002 return retval;
64003 }
64004
64005@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64006 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64007 return -EPERM;
64008
64009+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64010+ return -EPERM;
64011+ if (gr_handle_rawio(inode))
64012+ return -EPERM;
64013+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64014+ return -EACCES;
64015+
64016 return 0;
64017 }
64018
64019@@ -2826,7 +2864,7 @@ looked_up:
64020 * cleared otherwise prior to returning.
64021 */
64022 static int lookup_open(struct nameidata *nd, struct path *path,
64023- struct file *file,
64024+ struct path *link, struct file *file,
64025 const struct open_flags *op,
64026 bool got_write, int *opened)
64027 {
64028@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64029 /* Negative dentry, just create the file */
64030 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64031 umode_t mode = op->mode;
64032+
64033+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64034+ error = -EACCES;
64035+ goto out_dput;
64036+ }
64037+
64038+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64039+ error = -EACCES;
64040+ goto out_dput;
64041+ }
64042+
64043 if (!IS_POSIXACL(dir->d_inode))
64044 mode &= ~current_umask();
64045 /*
64046@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64047 nd->flags & LOOKUP_EXCL);
64048 if (error)
64049 goto out_dput;
64050+ else
64051+ gr_handle_create(dentry, nd->path.mnt);
64052 }
64053 out_no_open:
64054 path->dentry = dentry;
64055@@ -2896,7 +2947,7 @@ out_dput:
64056 /*
64057 * Handle the last step of open()
64058 */
64059-static int do_last(struct nameidata *nd, struct path *path,
64060+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64061 struct file *file, const struct open_flags *op,
64062 int *opened, struct filename *name)
64063 {
64064@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64065 if (error)
64066 return error;
64067
64068+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64069+ error = -ENOENT;
64070+ goto out;
64071+ }
64072+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64073+ error = -EACCES;
64074+ goto out;
64075+ }
64076+
64077 audit_inode(name, dir, LOOKUP_PARENT);
64078 error = -EISDIR;
64079 /* trailing slashes? */
64080@@ -2965,7 +3025,7 @@ retry_lookup:
64081 */
64082 }
64083 mutex_lock(&dir->d_inode->i_mutex);
64084- error = lookup_open(nd, path, file, op, got_write, opened);
64085+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64086 mutex_unlock(&dir->d_inode->i_mutex);
64087
64088 if (error <= 0) {
64089@@ -2989,11 +3049,28 @@ retry_lookup:
64090 goto finish_open_created;
64091 }
64092
64093+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64094+ error = -ENOENT;
64095+ goto exit_dput;
64096+ }
64097+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64098+ error = -EACCES;
64099+ goto exit_dput;
64100+ }
64101+
64102 /*
64103 * create/update audit record if it already exists.
64104 */
64105- if (d_is_positive(path->dentry))
64106+ if (d_is_positive(path->dentry)) {
64107+ /* only check if O_CREAT is specified, all other checks need to go
64108+ into may_open */
64109+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64110+ error = -EACCES;
64111+ goto exit_dput;
64112+ }
64113+
64114 audit_inode(name, path->dentry, 0);
64115+ }
64116
64117 /*
64118 * If atomic_open() acquired write access it is dropped now due to
64119@@ -3034,6 +3111,11 @@ finish_lookup:
64120 }
64121 }
64122 BUG_ON(inode != path->dentry->d_inode);
64123+ /* if we're resolving a symlink to another symlink */
64124+ if (link && gr_handle_symlink_owner(link, inode)) {
64125+ error = -EACCES;
64126+ goto out;
64127+ }
64128 return 1;
64129 }
64130
64131@@ -3053,7 +3135,18 @@ finish_open:
64132 path_put(&save_parent);
64133 return error;
64134 }
64135+
64136+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64137+ error = -ENOENT;
64138+ goto out;
64139+ }
64140+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64141+ error = -EACCES;
64142+ goto out;
64143+ }
64144+
64145 audit_inode(name, nd->path.dentry, 0);
64146+
64147 error = -EISDIR;
64148 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64149 goto out;
64150@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64151 if (unlikely(error))
64152 goto out;
64153
64154- error = do_last(nd, &path, file, op, &opened, pathname);
64155+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64156 while (unlikely(error > 0)) { /* trailing symlink */
64157 struct path link = path;
64158 void *cookie;
64159@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64160 error = follow_link(&link, nd, &cookie);
64161 if (unlikely(error))
64162 break;
64163- error = do_last(nd, &path, file, op, &opened, pathname);
64164+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64165 put_link(nd, &link, cookie);
64166 }
64167 out:
64168@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64169 goto unlock;
64170
64171 error = -EEXIST;
64172- if (d_is_positive(dentry))
64173+ if (d_is_positive(dentry)) {
64174+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64175+ error = -ENOENT;
64176 goto fail;
64177-
64178+ }
64179 /*
64180 * Special case - lookup gave negative, but... we had foo/bar/
64181 * From the vfs_mknod() POV we just have a negative dentry -
64182@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64183 }
64184 EXPORT_SYMBOL(user_path_create);
64185
64186+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64187+{
64188+ struct filename *tmp = getname(pathname);
64189+ struct dentry *res;
64190+ if (IS_ERR(tmp))
64191+ return ERR_CAST(tmp);
64192+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64193+ if (IS_ERR(res))
64194+ putname(tmp);
64195+ else
64196+ *to = tmp;
64197+ return res;
64198+}
64199+
64200 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64201 {
64202 int error = may_create(dir, dentry);
64203@@ -3446,6 +3555,17 @@ retry:
64204
64205 if (!IS_POSIXACL(path.dentry->d_inode))
64206 mode &= ~current_umask();
64207+
64208+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64209+ error = -EPERM;
64210+ goto out;
64211+ }
64212+
64213+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64214+ error = -EACCES;
64215+ goto out;
64216+ }
64217+
64218 error = security_path_mknod(&path, dentry, mode, dev);
64219 if (error)
64220 goto out;
64221@@ -3461,6 +3581,8 @@ retry:
64222 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64223 break;
64224 }
64225+ if (!error)
64226+ gr_handle_create(dentry, path.mnt);
64227 out:
64228 done_path_create(&path, dentry);
64229 if (retry_estale(error, lookup_flags)) {
64230@@ -3515,9 +3637,16 @@ retry:
64231
64232 if (!IS_POSIXACL(path.dentry->d_inode))
64233 mode &= ~current_umask();
64234+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64235+ error = -EACCES;
64236+ goto out;
64237+ }
64238 error = security_path_mkdir(&path, dentry, mode);
64239 if (!error)
64240 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64241+ if (!error)
64242+ gr_handle_create(dentry, path.mnt);
64243+out:
64244 done_path_create(&path, dentry);
64245 if (retry_estale(error, lookup_flags)) {
64246 lookup_flags |= LOOKUP_REVAL;
64247@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64248 struct filename *name;
64249 struct dentry *dentry;
64250 struct nameidata nd;
64251+ u64 saved_ino = 0;
64252+ dev_t saved_dev = 0;
64253 unsigned int lookup_flags = 0;
64254 retry:
64255 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64256@@ -3633,10 +3764,21 @@ retry:
64257 error = -ENOENT;
64258 goto exit3;
64259 }
64260+
64261+ saved_ino = gr_get_ino_from_dentry(dentry);
64262+ saved_dev = gr_get_dev_from_dentry(dentry);
64263+
64264+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64265+ error = -EACCES;
64266+ goto exit3;
64267+ }
64268+
64269 error = security_path_rmdir(&nd.path, dentry);
64270 if (error)
64271 goto exit3;
64272 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64273+ if (!error && (saved_dev || saved_ino))
64274+ gr_handle_delete(saved_ino, saved_dev);
64275 exit3:
64276 dput(dentry);
64277 exit2:
64278@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64279 struct nameidata nd;
64280 struct inode *inode = NULL;
64281 struct inode *delegated_inode = NULL;
64282+ u64 saved_ino = 0;
64283+ dev_t saved_dev = 0;
64284 unsigned int lookup_flags = 0;
64285 retry:
64286 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64287@@ -3755,10 +3899,22 @@ retry_deleg:
64288 if (d_is_negative(dentry))
64289 goto slashes;
64290 ihold(inode);
64291+
64292+ if (inode->i_nlink <= 1) {
64293+ saved_ino = gr_get_ino_from_dentry(dentry);
64294+ saved_dev = gr_get_dev_from_dentry(dentry);
64295+ }
64296+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64297+ error = -EACCES;
64298+ goto exit2;
64299+ }
64300+
64301 error = security_path_unlink(&nd.path, dentry);
64302 if (error)
64303 goto exit2;
64304 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64305+ if (!error && (saved_ino || saved_dev))
64306+ gr_handle_delete(saved_ino, saved_dev);
64307 exit2:
64308 dput(dentry);
64309 }
64310@@ -3847,9 +4003,17 @@ retry:
64311 if (IS_ERR(dentry))
64312 goto out_putname;
64313
64314+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64315+ error = -EACCES;
64316+ goto out;
64317+ }
64318+
64319 error = security_path_symlink(&path, dentry, from->name);
64320 if (!error)
64321 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64322+ if (!error)
64323+ gr_handle_create(dentry, path.mnt);
64324+out:
64325 done_path_create(&path, dentry);
64326 if (retry_estale(error, lookup_flags)) {
64327 lookup_flags |= LOOKUP_REVAL;
64328@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64329 struct dentry *new_dentry;
64330 struct path old_path, new_path;
64331 struct inode *delegated_inode = NULL;
64332+ struct filename *to = NULL;
64333 int how = 0;
64334 int error;
64335
64336@@ -3976,7 +4141,7 @@ retry:
64337 if (error)
64338 return error;
64339
64340- new_dentry = user_path_create(newdfd, newname, &new_path,
64341+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64342 (how & LOOKUP_REVAL));
64343 error = PTR_ERR(new_dentry);
64344 if (IS_ERR(new_dentry))
64345@@ -3988,11 +4153,28 @@ retry:
64346 error = may_linkat(&old_path);
64347 if (unlikely(error))
64348 goto out_dput;
64349+
64350+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64351+ old_path.dentry->d_inode,
64352+ old_path.dentry->d_inode->i_mode, to)) {
64353+ error = -EACCES;
64354+ goto out_dput;
64355+ }
64356+
64357+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64358+ old_path.dentry, old_path.mnt, to)) {
64359+ error = -EACCES;
64360+ goto out_dput;
64361+ }
64362+
64363 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64364 if (error)
64365 goto out_dput;
64366 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64367+ if (!error)
64368+ gr_handle_create(new_dentry, new_path.mnt);
64369 out_dput:
64370+ putname(to);
64371 done_path_create(&new_path, new_dentry);
64372 if (delegated_inode) {
64373 error = break_deleg_wait(&delegated_inode);
64374@@ -4308,6 +4490,20 @@ retry_deleg:
64375 if (new_dentry == trap)
64376 goto exit5;
64377
64378+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64379+ /* use EXDEV error to cause 'mv' to switch to an alternative
64380+ * method for usability
64381+ */
64382+ error = -EXDEV;
64383+ goto exit5;
64384+ }
64385+
64386+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64387+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64388+ to, flags);
64389+ if (error)
64390+ goto exit5;
64391+
64392 error = security_path_rename(&oldnd.path, old_dentry,
64393 &newnd.path, new_dentry, flags);
64394 if (error)
64395@@ -4315,6 +4511,9 @@ retry_deleg:
64396 error = vfs_rename(old_dir->d_inode, old_dentry,
64397 new_dir->d_inode, new_dentry,
64398 &delegated_inode, flags);
64399+ if (!error)
64400+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64401+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64402 exit5:
64403 dput(new_dentry);
64404 exit4:
64405@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64406
64407 int readlink_copy(char __user *buffer, int buflen, const char *link)
64408 {
64409+ char tmpbuf[64];
64410+ const char *newlink;
64411 int len = PTR_ERR(link);
64412+
64413 if (IS_ERR(link))
64414 goto out;
64415
64416 len = strlen(link);
64417 if (len > (unsigned) buflen)
64418 len = buflen;
64419- if (copy_to_user(buffer, link, len))
64420+
64421+ if (len < sizeof(tmpbuf)) {
64422+ memcpy(tmpbuf, link, len);
64423+ newlink = tmpbuf;
64424+ } else
64425+ newlink = link;
64426+
64427+ if (copy_to_user(buffer, newlink, len))
64428 len = -EFAULT;
64429 out:
64430 return len;
64431diff --git a/fs/namespace.c b/fs/namespace.c
64432index cd1e968..e64ff16 100644
64433--- a/fs/namespace.c
64434+++ b/fs/namespace.c
64435@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64436 if (!(sb->s_flags & MS_RDONLY))
64437 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64438 up_write(&sb->s_umount);
64439+
64440+ gr_log_remount(mnt->mnt_devname, retval);
64441+
64442 return retval;
64443 }
64444
64445@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64446 }
64447 unlock_mount_hash();
64448 namespace_unlock();
64449+
64450+ gr_log_unmount(mnt->mnt_devname, retval);
64451+
64452 return retval;
64453 }
64454
64455@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64456 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64457 */
64458
64459-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64460+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64461 {
64462 struct path path;
64463 struct mount *mnt;
64464@@ -1565,7 +1571,7 @@ out:
64465 /*
64466 * The 2.0 compatible umount. No flags.
64467 */
64468-SYSCALL_DEFINE1(oldumount, char __user *, name)
64469+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64470 {
64471 return sys_umount(name, 0);
64472 }
64473@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64474 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64475 MS_STRICTATIME);
64476
64477+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64478+ retval = -EPERM;
64479+ goto dput_out;
64480+ }
64481+
64482+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64483+ retval = -EPERM;
64484+ goto dput_out;
64485+ }
64486+
64487 if (flags & MS_REMOUNT)
64488 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64489 data_page);
64490@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64491 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64492 dev_name, data_page);
64493 dput_out:
64494+ gr_log_mount(dev_name, &path, retval);
64495+
64496 path_put(&path);
64497+
64498 return retval;
64499 }
64500
64501@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64502 * number incrementing at 10Ghz will take 12,427 years to wrap which
64503 * is effectively never, so we can ignore the possibility.
64504 */
64505-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64506+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64507
64508 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64509 {
64510@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64511 return ERR_PTR(ret);
64512 }
64513 new_ns->ns.ops = &mntns_operations;
64514- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64515+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64516 atomic_set(&new_ns->count, 1);
64517 new_ns->root = NULL;
64518 INIT_LIST_HEAD(&new_ns->list);
64519@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64520 return new_ns;
64521 }
64522
64523-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64524+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64525 struct user_namespace *user_ns, struct fs_struct *new_fs)
64526 {
64527 struct mnt_namespace *new_ns;
64528@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64529 }
64530 EXPORT_SYMBOL(mount_subtree);
64531
64532-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64533- char __user *, type, unsigned long, flags, void __user *, data)
64534+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64535+ const char __user *, type, unsigned long, flags, void __user *, data)
64536 {
64537 int ret;
64538 char *kernel_type;
64539@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64540 if (error)
64541 goto out2;
64542
64543+ if (gr_handle_chroot_pivot()) {
64544+ error = -EPERM;
64545+ goto out2;
64546+ }
64547+
64548 get_fs_root(current->fs, &root);
64549 old_mp = lock_mount(&old);
64550 error = PTR_ERR(old_mp);
64551@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64552 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64553 return -EPERM;
64554
64555- if (fs->users != 1)
64556+ if (atomic_read(&fs->users) != 1)
64557 return -EINVAL;
64558
64559 get_mnt_ns(mnt_ns);
64560diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64561index 02f8d09..a5c25d1 100644
64562--- a/fs/nfs/callback_xdr.c
64563+++ b/fs/nfs/callback_xdr.c
64564@@ -51,7 +51,7 @@ struct callback_op {
64565 callback_decode_arg_t decode_args;
64566 callback_encode_res_t encode_res;
64567 long res_maxsize;
64568-};
64569+} __do_const;
64570
64571 static struct callback_op callback_ops[];
64572
64573diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64574index 2211f6b..30d0950 100644
64575--- a/fs/nfs/inode.c
64576+++ b/fs/nfs/inode.c
64577@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64578 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64579 }
64580
64581-static atomic_long_t nfs_attr_generation_counter;
64582+static atomic_long_unchecked_t nfs_attr_generation_counter;
64583
64584 static unsigned long nfs_read_attr_generation_counter(void)
64585 {
64586- return atomic_long_read(&nfs_attr_generation_counter);
64587+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64588 }
64589
64590 unsigned long nfs_inc_attr_generation_counter(void)
64591 {
64592- return atomic_long_inc_return(&nfs_attr_generation_counter);
64593+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64594 }
64595
64596 void nfs_fattr_init(struct nfs_fattr *fattr)
64597diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64598index ac71d13..a2e590a 100644
64599--- a/fs/nfsd/nfs4proc.c
64600+++ b/fs/nfsd/nfs4proc.c
64601@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64602 nfsd4op_rsize op_rsize_bop;
64603 stateid_getter op_get_currentstateid;
64604 stateid_setter op_set_currentstateid;
64605-};
64606+} __do_const;
64607
64608 static struct nfsd4_operation nfsd4_ops[];
64609
64610diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64611index 15f7b73..00e230b 100644
64612--- a/fs/nfsd/nfs4xdr.c
64613+++ b/fs/nfsd/nfs4xdr.c
64614@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64615
64616 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64617
64618-static nfsd4_dec nfsd4_dec_ops[] = {
64619+static const nfsd4_dec nfsd4_dec_ops[] = {
64620 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64621 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64622 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64623diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64624index 83a9694..6b7f928 100644
64625--- a/fs/nfsd/nfscache.c
64626+++ b/fs/nfsd/nfscache.c
64627@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64628 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64629 u32 hash;
64630 struct nfsd_drc_bucket *b;
64631- int len;
64632+ long len;
64633 size_t bufsize = 0;
64634
64635 if (!rp)
64636@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64637 hash = nfsd_cache_hash(rp->c_xid);
64638 b = &drc_hashtbl[hash];
64639
64640- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64641- len >>= 2;
64642+ if (statp) {
64643+ len = (char*)statp - (char*)resv->iov_base;
64644+ len = resv->iov_len - len;
64645+ len >>= 2;
64646+ }
64647
64648 /* Don't cache excessive amounts of data and XDR failures */
64649- if (!statp || len > (256 >> 2)) {
64650+ if (!statp || len > (256 >> 2) || len < 0) {
64651 nfsd_reply_cache_free(b, rp);
64652 return;
64653 }
64654@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64655 switch (cachetype) {
64656 case RC_REPLSTAT:
64657 if (len != 1)
64658- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64659+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64660 rp->c_replstat = *statp;
64661 break;
64662 case RC_REPLBUFF:
64663diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64664index 5685c67..73029ef 100644
64665--- a/fs/nfsd/vfs.c
64666+++ b/fs/nfsd/vfs.c
64667@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64668
64669 oldfs = get_fs();
64670 set_fs(KERNEL_DS);
64671- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64672+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64673 set_fs(oldfs);
64674 return nfsd_finish_read(file, count, host_err);
64675 }
64676@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64677
64678 /* Write the data. */
64679 oldfs = get_fs(); set_fs(KERNEL_DS);
64680- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64681+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64682 set_fs(oldfs);
64683 if (host_err < 0)
64684 goto out_nfserr;
64685@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64686 */
64687
64688 oldfs = get_fs(); set_fs(KERNEL_DS);
64689- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64690+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64691 set_fs(oldfs);
64692
64693 if (host_err < 0)
64694diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64695index 52ccd34..7a6b202 100644
64696--- a/fs/nls/nls_base.c
64697+++ b/fs/nls/nls_base.c
64698@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64699
64700 int __register_nls(struct nls_table *nls, struct module *owner)
64701 {
64702- struct nls_table ** tmp = &tables;
64703+ struct nls_table *tmp = tables;
64704
64705 if (nls->next)
64706 return -EBUSY;
64707
64708- nls->owner = owner;
64709+ pax_open_kernel();
64710+ *(void **)&nls->owner = owner;
64711+ pax_close_kernel();
64712 spin_lock(&nls_lock);
64713- while (*tmp) {
64714- if (nls == *tmp) {
64715+ while (tmp) {
64716+ if (nls == tmp) {
64717 spin_unlock(&nls_lock);
64718 return -EBUSY;
64719 }
64720- tmp = &(*tmp)->next;
64721+ tmp = tmp->next;
64722 }
64723- nls->next = tables;
64724+ pax_open_kernel();
64725+ *(struct nls_table **)&nls->next = tables;
64726+ pax_close_kernel();
64727 tables = nls;
64728 spin_unlock(&nls_lock);
64729 return 0;
64730@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64731
64732 int unregister_nls(struct nls_table * nls)
64733 {
64734- struct nls_table ** tmp = &tables;
64735+ struct nls_table * const * tmp = &tables;
64736
64737 spin_lock(&nls_lock);
64738 while (*tmp) {
64739 if (nls == *tmp) {
64740- *tmp = nls->next;
64741+ pax_open_kernel();
64742+ *(struct nls_table **)tmp = nls->next;
64743+ pax_close_kernel();
64744 spin_unlock(&nls_lock);
64745 return 0;
64746 }
64747@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64748 return -EINVAL;
64749 }
64750
64751-static struct nls_table *find_nls(char *charset)
64752+static struct nls_table *find_nls(const char *charset)
64753 {
64754 struct nls_table *nls;
64755 spin_lock(&nls_lock);
64756@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64757 return nls;
64758 }
64759
64760-struct nls_table *load_nls(char *charset)
64761+struct nls_table *load_nls(const char *charset)
64762 {
64763 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64764 }
64765diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64766index 162b3f1..6076a7c 100644
64767--- a/fs/nls/nls_euc-jp.c
64768+++ b/fs/nls/nls_euc-jp.c
64769@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64770 p_nls = load_nls("cp932");
64771
64772 if (p_nls) {
64773- table.charset2upper = p_nls->charset2upper;
64774- table.charset2lower = p_nls->charset2lower;
64775+ pax_open_kernel();
64776+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64777+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64778+ pax_close_kernel();
64779 return register_nls(&table);
64780 }
64781
64782diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64783index a80a741..7b96e1b 100644
64784--- a/fs/nls/nls_koi8-ru.c
64785+++ b/fs/nls/nls_koi8-ru.c
64786@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64787 p_nls = load_nls("koi8-u");
64788
64789 if (p_nls) {
64790- table.charset2upper = p_nls->charset2upper;
64791- table.charset2lower = p_nls->charset2lower;
64792+ pax_open_kernel();
64793+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64794+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64795+ pax_close_kernel();
64796 return register_nls(&table);
64797 }
64798
64799diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64800index bff8567..83281c6 100644
64801--- a/fs/notify/fanotify/fanotify_user.c
64802+++ b/fs/notify/fanotify/fanotify_user.c
64803@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64804
64805 fd = fanotify_event_metadata.fd;
64806 ret = -EFAULT;
64807- if (copy_to_user(buf, &fanotify_event_metadata,
64808- fanotify_event_metadata.event_len))
64809+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64810+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64811 goto out_close_fd;
64812
64813 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64814diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64815index a95d8e0..a91a5fd 100644
64816--- a/fs/notify/notification.c
64817+++ b/fs/notify/notification.c
64818@@ -48,7 +48,7 @@
64819 #include <linux/fsnotify_backend.h>
64820 #include "fsnotify.h"
64821
64822-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64823+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64824
64825 /**
64826 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64827@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64828 */
64829 u32 fsnotify_get_cookie(void)
64830 {
64831- return atomic_inc_return(&fsnotify_sync_cookie);
64832+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64833 }
64834 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64835
64836diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64837index 9e38daf..5727cae 100644
64838--- a/fs/ntfs/dir.c
64839+++ b/fs/ntfs/dir.c
64840@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64841 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64842 ~(s64)(ndir->itype.index.block_size - 1)));
64843 /* Bounds checks. */
64844- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64845+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64846 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64847 "inode 0x%lx or driver bug.", vdir->i_ino);
64848 goto err_out;
64849diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64850index 643faa4..ef9027e 100644
64851--- a/fs/ntfs/file.c
64852+++ b/fs/ntfs/file.c
64853@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64854 char *addr;
64855 size_t total = 0;
64856 unsigned len;
64857- int left;
64858+ unsigned left;
64859
64860 do {
64861 len = PAGE_CACHE_SIZE - ofs;
64862diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64863index 9e1e112..241a52a 100644
64864--- a/fs/ntfs/super.c
64865+++ b/fs/ntfs/super.c
64866@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64867 if (!silent)
64868 ntfs_error(sb, "Primary boot sector is invalid.");
64869 } else if (!silent)
64870- ntfs_error(sb, read_err_str, "primary");
64871+ ntfs_error(sb, read_err_str, "%s", "primary");
64872 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64873 if (bh_primary)
64874 brelse(bh_primary);
64875@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64876 goto hotfix_primary_boot_sector;
64877 brelse(bh_backup);
64878 } else if (!silent)
64879- ntfs_error(sb, read_err_str, "backup");
64880+ ntfs_error(sb, read_err_str, "%s", "backup");
64881 /* Try to read NT3.51- backup boot sector. */
64882 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64883 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64884@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64885 "sector.");
64886 brelse(bh_backup);
64887 } else if (!silent)
64888- ntfs_error(sb, read_err_str, "backup");
64889+ ntfs_error(sb, read_err_str, "%s", "backup");
64890 /* We failed. Cleanup and return. */
64891 if (bh_primary)
64892 brelse(bh_primary);
64893diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64894index 0440134..d52c93a 100644
64895--- a/fs/ocfs2/localalloc.c
64896+++ b/fs/ocfs2/localalloc.c
64897@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64898 goto bail;
64899 }
64900
64901- atomic_inc(&osb->alloc_stats.moves);
64902+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64903
64904 bail:
64905 if (handle)
64906diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64907index 7d6b7d0..5fb529a 100644
64908--- a/fs/ocfs2/ocfs2.h
64909+++ b/fs/ocfs2/ocfs2.h
64910@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64911
64912 struct ocfs2_alloc_stats
64913 {
64914- atomic_t moves;
64915- atomic_t local_data;
64916- atomic_t bitmap_data;
64917- atomic_t bg_allocs;
64918- atomic_t bg_extends;
64919+ atomic_unchecked_t moves;
64920+ atomic_unchecked_t local_data;
64921+ atomic_unchecked_t bitmap_data;
64922+ atomic_unchecked_t bg_allocs;
64923+ atomic_unchecked_t bg_extends;
64924 };
64925
64926 enum ocfs2_local_alloc_state
64927diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
64928index 0cb889a..6a26b24 100644
64929--- a/fs/ocfs2/suballoc.c
64930+++ b/fs/ocfs2/suballoc.c
64931@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
64932 mlog_errno(status);
64933 goto bail;
64934 }
64935- atomic_inc(&osb->alloc_stats.bg_extends);
64936+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
64937
64938 /* You should never ask for this much metadata */
64939 BUG_ON(bits_wanted >
64940@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
64941 mlog_errno(status);
64942 goto bail;
64943 }
64944- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64945+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64946
64947 *suballoc_loc = res.sr_bg_blkno;
64948 *suballoc_bit_start = res.sr_bit_offset;
64949@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
64950 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
64951 res->sr_bits);
64952
64953- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64954+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64955
64956 BUG_ON(res->sr_bits != 1);
64957
64958@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
64959 mlog_errno(status);
64960 goto bail;
64961 }
64962- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64963+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64964
64965 BUG_ON(res.sr_bits != 1);
64966
64967@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64968 cluster_start,
64969 num_clusters);
64970 if (!status)
64971- atomic_inc(&osb->alloc_stats.local_data);
64972+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
64973 } else {
64974 if (min_clusters > (osb->bitmap_cpg - 1)) {
64975 /* The only paths asking for contiguousness
64976@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64977 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
64978 res.sr_bg_blkno,
64979 res.sr_bit_offset);
64980- atomic_inc(&osb->alloc_stats.bitmap_data);
64981+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
64982 *num_clusters = res.sr_bits;
64983 }
64984 }
64985diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
64986index 8372317..ec86e79 100644
64987--- a/fs/ocfs2/super.c
64988+++ b/fs/ocfs2/super.c
64989@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
64990 "%10s => GlobalAllocs: %d LocalAllocs: %d "
64991 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
64992 "Stats",
64993- atomic_read(&osb->alloc_stats.bitmap_data),
64994- atomic_read(&osb->alloc_stats.local_data),
64995- atomic_read(&osb->alloc_stats.bg_allocs),
64996- atomic_read(&osb->alloc_stats.moves),
64997- atomic_read(&osb->alloc_stats.bg_extends));
64998+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
64999+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65000+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65001+ atomic_read_unchecked(&osb->alloc_stats.moves),
65002+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65003
65004 out += snprintf(buf + out, len - out,
65005 "%10s => State: %u Descriptor: %llu Size: %u bits "
65006@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65007
65008 mutex_init(&osb->system_file_mutex);
65009
65010- atomic_set(&osb->alloc_stats.moves, 0);
65011- atomic_set(&osb->alloc_stats.local_data, 0);
65012- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65013- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65014- atomic_set(&osb->alloc_stats.bg_extends, 0);
65015+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65016+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65017+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65018+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65019+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65020
65021 /* Copy the blockcheck stats from the superblock probe */
65022 osb->osb_ecc_stats = *stats;
65023diff --git a/fs/open.c b/fs/open.c
65024index 813be03..781941d 100644
65025--- a/fs/open.c
65026+++ b/fs/open.c
65027@@ -32,6 +32,8 @@
65028 #include <linux/dnotify.h>
65029 #include <linux/compat.h>
65030
65031+#define CREATE_TRACE_POINTS
65032+#include <trace/events/fs.h>
65033 #include "internal.h"
65034
65035 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65036@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65037 error = locks_verify_truncate(inode, NULL, length);
65038 if (!error)
65039 error = security_path_truncate(path);
65040+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65041+ error = -EACCES;
65042 if (!error)
65043 error = do_truncate(path->dentry, length, 0, NULL);
65044
65045@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65046 error = locks_verify_truncate(inode, f.file, length);
65047 if (!error)
65048 error = security_path_truncate(&f.file->f_path);
65049+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65050+ error = -EACCES;
65051 if (!error)
65052 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65053 sb_end_write(inode->i_sb);
65054@@ -392,6 +398,9 @@ retry:
65055 if (__mnt_is_readonly(path.mnt))
65056 res = -EROFS;
65057
65058+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65059+ res = -EACCES;
65060+
65061 out_path_release:
65062 path_put(&path);
65063 if (retry_estale(res, lookup_flags)) {
65064@@ -423,6 +432,8 @@ retry:
65065 if (error)
65066 goto dput_and_out;
65067
65068+ gr_log_chdir(path.dentry, path.mnt);
65069+
65070 set_fs_pwd(current->fs, &path);
65071
65072 dput_and_out:
65073@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65074 goto out_putf;
65075
65076 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65077+
65078+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65079+ error = -EPERM;
65080+
65081+ if (!error)
65082+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65083+
65084 if (!error)
65085 set_fs_pwd(current->fs, &f.file->f_path);
65086 out_putf:
65087@@ -481,7 +499,13 @@ retry:
65088 if (error)
65089 goto dput_and_out;
65090
65091+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65092+ goto dput_and_out;
65093+
65094 set_fs_root(current->fs, &path);
65095+
65096+ gr_handle_chroot_chdir(&path);
65097+
65098 error = 0;
65099 dput_and_out:
65100 path_put(&path);
65101@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65102 return error;
65103 retry_deleg:
65104 mutex_lock(&inode->i_mutex);
65105+
65106+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65107+ error = -EACCES;
65108+ goto out_unlock;
65109+ }
65110+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65111+ error = -EACCES;
65112+ goto out_unlock;
65113+ }
65114+
65115 error = security_path_chmod(path, mode);
65116 if (error)
65117 goto out_unlock;
65118@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65119 uid = make_kuid(current_user_ns(), user);
65120 gid = make_kgid(current_user_ns(), group);
65121
65122+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65123+ return -EACCES;
65124+
65125 newattrs.ia_valid = ATTR_CTIME;
65126 if (user != (uid_t) -1) {
65127 if (!uid_valid(uid))
65128@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65129 } else {
65130 fsnotify_open(f);
65131 fd_install(fd, f);
65132+ trace_do_sys_open(tmp->name, flags, mode);
65133 }
65134 }
65135 putname(tmp);
65136diff --git a/fs/pipe.c b/fs/pipe.c
65137index 21981e5..3d5f55c 100644
65138--- a/fs/pipe.c
65139+++ b/fs/pipe.c
65140@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65141
65142 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65143 {
65144- if (pipe->files)
65145+ if (atomic_read(&pipe->files))
65146 mutex_lock_nested(&pipe->mutex, subclass);
65147 }
65148
65149@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65150
65151 void pipe_unlock(struct pipe_inode_info *pipe)
65152 {
65153- if (pipe->files)
65154+ if (atomic_read(&pipe->files))
65155 mutex_unlock(&pipe->mutex);
65156 }
65157 EXPORT_SYMBOL(pipe_unlock);
65158@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65159 }
65160 if (bufs) /* More to do? */
65161 continue;
65162- if (!pipe->writers)
65163+ if (!atomic_read(&pipe->writers))
65164 break;
65165- if (!pipe->waiting_writers) {
65166+ if (!atomic_read(&pipe->waiting_writers)) {
65167 /* syscall merging: Usually we must not sleep
65168 * if O_NONBLOCK is set, or if we got some data.
65169 * But if a writer sleeps in kernel space, then
65170@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65171
65172 __pipe_lock(pipe);
65173
65174- if (!pipe->readers) {
65175+ if (!atomic_read(&pipe->readers)) {
65176 send_sig(SIGPIPE, current, 0);
65177 ret = -EPIPE;
65178 goto out;
65179@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65180 for (;;) {
65181 int bufs;
65182
65183- if (!pipe->readers) {
65184+ if (!atomic_read(&pipe->readers)) {
65185 send_sig(SIGPIPE, current, 0);
65186 if (!ret)
65187 ret = -EPIPE;
65188@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65189 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65190 do_wakeup = 0;
65191 }
65192- pipe->waiting_writers++;
65193+ atomic_inc(&pipe->waiting_writers);
65194 pipe_wait(pipe);
65195- pipe->waiting_writers--;
65196+ atomic_dec(&pipe->waiting_writers);
65197 }
65198 out:
65199 __pipe_unlock(pipe);
65200@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65201 mask = 0;
65202 if (filp->f_mode & FMODE_READ) {
65203 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65204- if (!pipe->writers && filp->f_version != pipe->w_counter)
65205+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65206 mask |= POLLHUP;
65207 }
65208
65209@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65210 * Most Unices do not set POLLERR for FIFOs but on Linux they
65211 * behave exactly like pipes for poll().
65212 */
65213- if (!pipe->readers)
65214+ if (!atomic_read(&pipe->readers))
65215 mask |= POLLERR;
65216 }
65217
65218@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65219 int kill = 0;
65220
65221 spin_lock(&inode->i_lock);
65222- if (!--pipe->files) {
65223+ if (atomic_dec_and_test(&pipe->files)) {
65224 inode->i_pipe = NULL;
65225 kill = 1;
65226 }
65227@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65228
65229 __pipe_lock(pipe);
65230 if (file->f_mode & FMODE_READ)
65231- pipe->readers--;
65232+ atomic_dec(&pipe->readers);
65233 if (file->f_mode & FMODE_WRITE)
65234- pipe->writers--;
65235+ atomic_dec(&pipe->writers);
65236
65237- if (pipe->readers || pipe->writers) {
65238+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65239 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65240 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65241 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65242@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65243 kfree(pipe);
65244 }
65245
65246-static struct vfsmount *pipe_mnt __read_mostly;
65247+struct vfsmount *pipe_mnt __read_mostly;
65248
65249 /*
65250 * pipefs_dname() is called from d_path().
65251@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65252 goto fail_iput;
65253
65254 inode->i_pipe = pipe;
65255- pipe->files = 2;
65256- pipe->readers = pipe->writers = 1;
65257+ atomic_set(&pipe->files, 2);
65258+ atomic_set(&pipe->readers, 1);
65259+ atomic_set(&pipe->writers, 1);
65260 inode->i_fop = &pipefifo_fops;
65261
65262 /*
65263@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65264 spin_lock(&inode->i_lock);
65265 if (inode->i_pipe) {
65266 pipe = inode->i_pipe;
65267- pipe->files++;
65268+ atomic_inc(&pipe->files);
65269 spin_unlock(&inode->i_lock);
65270 } else {
65271 spin_unlock(&inode->i_lock);
65272 pipe = alloc_pipe_info();
65273 if (!pipe)
65274 return -ENOMEM;
65275- pipe->files = 1;
65276+ atomic_set(&pipe->files, 1);
65277 spin_lock(&inode->i_lock);
65278 if (unlikely(inode->i_pipe)) {
65279- inode->i_pipe->files++;
65280+ atomic_inc(&inode->i_pipe->files);
65281 spin_unlock(&inode->i_lock);
65282 free_pipe_info(pipe);
65283 pipe = inode->i_pipe;
65284@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65285 * opened, even when there is no process writing the FIFO.
65286 */
65287 pipe->r_counter++;
65288- if (pipe->readers++ == 0)
65289+ if (atomic_inc_return(&pipe->readers) == 1)
65290 wake_up_partner(pipe);
65291
65292- if (!is_pipe && !pipe->writers) {
65293+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65294 if ((filp->f_flags & O_NONBLOCK)) {
65295 /* suppress POLLHUP until we have
65296 * seen a writer */
65297@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65298 * errno=ENXIO when there is no process reading the FIFO.
65299 */
65300 ret = -ENXIO;
65301- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65302+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65303 goto err;
65304
65305 pipe->w_counter++;
65306- if (!pipe->writers++)
65307+ if (atomic_inc_return(&pipe->writers) == 1)
65308 wake_up_partner(pipe);
65309
65310- if (!is_pipe && !pipe->readers) {
65311+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65312 if (wait_for_partner(pipe, &pipe->r_counter))
65313 goto err_wr;
65314 }
65315@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65316 * the process can at least talk to itself.
65317 */
65318
65319- pipe->readers++;
65320- pipe->writers++;
65321+ atomic_inc(&pipe->readers);
65322+ atomic_inc(&pipe->writers);
65323 pipe->r_counter++;
65324 pipe->w_counter++;
65325- if (pipe->readers == 1 || pipe->writers == 1)
65326+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65327 wake_up_partner(pipe);
65328 break;
65329
65330@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65331 return 0;
65332
65333 err_rd:
65334- if (!--pipe->readers)
65335+ if (atomic_dec_and_test(&pipe->readers))
65336 wake_up_interruptible(&pipe->wait);
65337 ret = -ERESTARTSYS;
65338 goto err;
65339
65340 err_wr:
65341- if (!--pipe->writers)
65342+ if (atomic_dec_and_test(&pipe->writers))
65343 wake_up_interruptible(&pipe->wait);
65344 ret = -ERESTARTSYS;
65345 goto err;
65346diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65347index 0855f77..6787d50 100644
65348--- a/fs/posix_acl.c
65349+++ b/fs/posix_acl.c
65350@@ -20,6 +20,7 @@
65351 #include <linux/xattr.h>
65352 #include <linux/export.h>
65353 #include <linux/user_namespace.h>
65354+#include <linux/grsecurity.h>
65355
65356 struct posix_acl **acl_by_type(struct inode *inode, int type)
65357 {
65358@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65359 }
65360 }
65361 if (mode_p)
65362- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65363+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65364 return not_equiv;
65365 }
65366 EXPORT_SYMBOL(posix_acl_equiv_mode);
65367@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65368 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65369 }
65370
65371- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65372+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65373 return not_equiv;
65374 }
65375
65376@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65377 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65378 int err = -ENOMEM;
65379 if (clone) {
65380+ *mode_p &= ~gr_acl_umask();
65381+
65382 err = posix_acl_create_masq(clone, mode_p);
65383 if (err < 0) {
65384 posix_acl_release(clone);
65385@@ -659,11 +662,12 @@ struct posix_acl *
65386 posix_acl_from_xattr(struct user_namespace *user_ns,
65387 const void *value, size_t size)
65388 {
65389- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65390- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65391+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65392+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65393 int count;
65394 struct posix_acl *acl;
65395 struct posix_acl_entry *acl_e;
65396+ umode_t umask = gr_acl_umask();
65397
65398 if (!value)
65399 return NULL;
65400@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65401
65402 switch(acl_e->e_tag) {
65403 case ACL_USER_OBJ:
65404+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65405+ break;
65406 case ACL_GROUP_OBJ:
65407 case ACL_MASK:
65408+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65409+ break;
65410 case ACL_OTHER:
65411+ acl_e->e_perm &= ~(umask & S_IRWXO);
65412 break;
65413
65414 case ACL_USER:
65415+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65416 acl_e->e_uid =
65417 make_kuid(user_ns,
65418 le32_to_cpu(entry->e_id));
65419@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65420 goto fail;
65421 break;
65422 case ACL_GROUP:
65423+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65424 acl_e->e_gid =
65425 make_kgid(user_ns,
65426 le32_to_cpu(entry->e_id));
65427diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65428index 2183fcf..3c32a98 100644
65429--- a/fs/proc/Kconfig
65430+++ b/fs/proc/Kconfig
65431@@ -30,7 +30,7 @@ config PROC_FS
65432
65433 config PROC_KCORE
65434 bool "/proc/kcore support" if !ARM
65435- depends on PROC_FS && MMU
65436+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65437 help
65438 Provides a virtual ELF core file of the live kernel. This can
65439 be read with gdb and other ELF tools. No modifications can be
65440@@ -38,8 +38,8 @@ config PROC_KCORE
65441
65442 config PROC_VMCORE
65443 bool "/proc/vmcore support"
65444- depends on PROC_FS && CRASH_DUMP
65445- default y
65446+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65447+ default n
65448 help
65449 Exports the dump image of crashed kernel in ELF format.
65450
65451@@ -63,8 +63,8 @@ config PROC_SYSCTL
65452 limited in memory.
65453
65454 config PROC_PAGE_MONITOR
65455- default y
65456- depends on PROC_FS && MMU
65457+ default n
65458+ depends on PROC_FS && MMU && !GRKERNSEC
65459 bool "Enable /proc page monitoring" if EXPERT
65460 help
65461 Various /proc files exist to monitor process memory utilization:
65462diff --git a/fs/proc/array.c b/fs/proc/array.c
65463index bd117d0..e6872d7 100644
65464--- a/fs/proc/array.c
65465+++ b/fs/proc/array.c
65466@@ -60,6 +60,7 @@
65467 #include <linux/tty.h>
65468 #include <linux/string.h>
65469 #include <linux/mman.h>
65470+#include <linux/grsecurity.h>
65471 #include <linux/proc_fs.h>
65472 #include <linux/ioport.h>
65473 #include <linux/uaccess.h>
65474@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65475 seq_putc(m, '\n');
65476 }
65477
65478+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65479+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65480+{
65481+ if (p->mm)
65482+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65483+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65484+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65485+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65486+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65487+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65488+ else
65489+ seq_printf(m, "PaX:\t-----\n");
65490+}
65491+#endif
65492+
65493 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65494 struct pid *pid, struct task_struct *task)
65495 {
65496@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65497 task_cpus_allowed(m, task);
65498 cpuset_task_status_allowed(m, task);
65499 task_context_switch_counts(m, task);
65500+
65501+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65502+ task_pax(m, task);
65503+#endif
65504+
65505+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65506+ task_grsec_rbac(m, task);
65507+#endif
65508+
65509 return 0;
65510 }
65511
65512+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65513+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65514+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65515+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65516+#endif
65517+
65518 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65519 struct pid *pid, struct task_struct *task, int whole)
65520 {
65521@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65522 char tcomm[sizeof(task->comm)];
65523 unsigned long flags;
65524
65525+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65526+ if (current->exec_id != m->exec_id) {
65527+ gr_log_badprocpid("stat");
65528+ return 0;
65529+ }
65530+#endif
65531+
65532 state = *get_task_state(task);
65533 vsize = eip = esp = 0;
65534 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65535@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65536 gtime = task_gtime(task);
65537 }
65538
65539+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65540+ if (PAX_RAND_FLAGS(mm)) {
65541+ eip = 0;
65542+ esp = 0;
65543+ wchan = 0;
65544+ }
65545+#endif
65546+#ifdef CONFIG_GRKERNSEC_HIDESYM
65547+ wchan = 0;
65548+ eip =0;
65549+ esp =0;
65550+#endif
65551+
65552 /* scale priority and nice values from timeslices to -20..20 */
65553 /* to make it look like a "normal" Unix priority/nice value */
65554 priority = task_prio(task);
65555@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65556 seq_put_decimal_ull(m, ' ', vsize);
65557 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65558 seq_put_decimal_ull(m, ' ', rsslim);
65559+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65560+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65561+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65562+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65563+#else
65564 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65565 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65566 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65567+#endif
65568 seq_put_decimal_ull(m, ' ', esp);
65569 seq_put_decimal_ull(m, ' ', eip);
65570 /* The signal information here is obsolete.
65571@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65572 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65573 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65574
65575- if (mm && permitted) {
65576+ if (mm && permitted
65577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65578+ && !PAX_RAND_FLAGS(mm)
65579+#endif
65580+ ) {
65581 seq_put_decimal_ull(m, ' ', mm->start_data);
65582 seq_put_decimal_ull(m, ' ', mm->end_data);
65583 seq_put_decimal_ull(m, ' ', mm->start_brk);
65584@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65585 struct pid *pid, struct task_struct *task)
65586 {
65587 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65588- struct mm_struct *mm = get_task_mm(task);
65589+ struct mm_struct *mm;
65590
65591+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65592+ if (current->exec_id != m->exec_id) {
65593+ gr_log_badprocpid("statm");
65594+ return 0;
65595+ }
65596+#endif
65597+ mm = get_task_mm(task);
65598 if (mm) {
65599 size = task_statm(mm, &shared, &text, &data, &resident);
65600 mmput(mm);
65601@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65602 return 0;
65603 }
65604
65605+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65606+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65607+{
65608+ unsigned long flags;
65609+ u32 curr_ip = 0;
65610+
65611+ if (lock_task_sighand(task, &flags)) {
65612+ curr_ip = task->signal->curr_ip;
65613+ unlock_task_sighand(task, &flags);
65614+ }
65615+ return seq_printf(m, "%pI4\n", &curr_ip);
65616+}
65617+#endif
65618+
65619 #ifdef CONFIG_CHECKPOINT_RESTORE
65620 static struct pid *
65621 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65622diff --git a/fs/proc/base.c b/fs/proc/base.c
65623index 3f3d7ae..68de109 100644
65624--- a/fs/proc/base.c
65625+++ b/fs/proc/base.c
65626@@ -113,6 +113,14 @@ struct pid_entry {
65627 union proc_op op;
65628 };
65629
65630+struct getdents_callback {
65631+ struct linux_dirent __user * current_dir;
65632+ struct linux_dirent __user * previous;
65633+ struct file * file;
65634+ int count;
65635+ int error;
65636+};
65637+
65638 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65639 .name = (NAME), \
65640 .len = sizeof(NAME) - 1, \
65641@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65642 return 0;
65643 }
65644
65645+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65646+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65647+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65648+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65649+#endif
65650+
65651 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65652 struct pid *pid, struct task_struct *task)
65653 {
65654 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65655 if (mm && !IS_ERR(mm)) {
65656 unsigned int nwords = 0;
65657+
65658+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65659+ /* allow if we're currently ptracing this task */
65660+ if (PAX_RAND_FLAGS(mm) &&
65661+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65662+ mmput(mm);
65663+ return 0;
65664+ }
65665+#endif
65666+
65667 do {
65668 nwords += 2;
65669 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65670@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65671 }
65672
65673
65674-#ifdef CONFIG_KALLSYMS
65675+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65676 /*
65677 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65678 * Returns the resolved symbol. If that fails, simply return the address.
65679@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65680 mutex_unlock(&task->signal->cred_guard_mutex);
65681 }
65682
65683-#ifdef CONFIG_STACKTRACE
65684+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65685
65686 #define MAX_STACK_TRACE_DEPTH 64
65687
65688@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65689 return 0;
65690 }
65691
65692-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65693+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65694 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65695 struct pid *pid, struct task_struct *task)
65696 {
65697@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65698 /************************************************************************/
65699
65700 /* permission checks */
65701-static int proc_fd_access_allowed(struct inode *inode)
65702+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65703 {
65704 struct task_struct *task;
65705 int allowed = 0;
65706@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65707 */
65708 task = get_proc_task(inode);
65709 if (task) {
65710- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65711+ if (log)
65712+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65713+ else
65714+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65715 put_task_struct(task);
65716 }
65717 return allowed;
65718@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65719 struct task_struct *task,
65720 int hide_pid_min)
65721 {
65722+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65723+ return false;
65724+
65725+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65726+ rcu_read_lock();
65727+ {
65728+ const struct cred *tmpcred = current_cred();
65729+ const struct cred *cred = __task_cred(task);
65730+
65731+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65732+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65733+ || in_group_p(grsec_proc_gid)
65734+#endif
65735+ ) {
65736+ rcu_read_unlock();
65737+ return true;
65738+ }
65739+ }
65740+ rcu_read_unlock();
65741+
65742+ if (!pid->hide_pid)
65743+ return false;
65744+#endif
65745+
65746 if (pid->hide_pid < hide_pid_min)
65747 return true;
65748 if (in_group_p(pid->pid_gid))
65749 return true;
65750+
65751 return ptrace_may_access(task, PTRACE_MODE_READ);
65752 }
65753
65754@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65755 put_task_struct(task);
65756
65757 if (!has_perms) {
65758+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65759+ {
65760+#else
65761 if (pid->hide_pid == 2) {
65762+#endif
65763 /*
65764 * Let's make getdents(), stat(), and open()
65765 * consistent with each other. If a process
65766@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65767
65768 if (task) {
65769 mm = mm_access(task, mode);
65770+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65771+ mmput(mm);
65772+ mm = ERR_PTR(-EPERM);
65773+ }
65774 put_task_struct(task);
65775
65776 if (!IS_ERR_OR_NULL(mm)) {
65777@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65778 return PTR_ERR(mm);
65779
65780 file->private_data = mm;
65781+
65782+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65783+ file->f_version = current->exec_id;
65784+#endif
65785+
65786 return 0;
65787 }
65788
65789@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65790 ssize_t copied;
65791 char *page;
65792
65793+#ifdef CONFIG_GRKERNSEC
65794+ if (write)
65795+ return -EPERM;
65796+#endif
65797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65798+ if (file->f_version != current->exec_id) {
65799+ gr_log_badprocpid("mem");
65800+ return 0;
65801+ }
65802+#endif
65803+
65804 if (!mm)
65805 return 0;
65806
65807@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65808 goto free;
65809
65810 while (count > 0) {
65811- int this_len = min_t(int, count, PAGE_SIZE);
65812+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65813
65814 if (write && copy_from_user(page, buf, this_len)) {
65815 copied = -EFAULT;
65816@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65817 if (!mm)
65818 return 0;
65819
65820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65821+ if (file->f_version != current->exec_id) {
65822+ gr_log_badprocpid("environ");
65823+ return 0;
65824+ }
65825+#endif
65826+
65827 page = (char *)__get_free_page(GFP_TEMPORARY);
65828 if (!page)
65829 return -ENOMEM;
65830@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65831 goto free;
65832 while (count > 0) {
65833 size_t this_len, max_len;
65834- int retval;
65835+ ssize_t retval;
65836
65837 if (src >= (mm->env_end - mm->env_start))
65838 break;
65839@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65840 int error = -EACCES;
65841
65842 /* Are we allowed to snoop on the tasks file descriptors? */
65843- if (!proc_fd_access_allowed(inode))
65844+ if (!proc_fd_access_allowed(inode, 0))
65845 goto out;
65846
65847 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65848@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65849 struct path path;
65850
65851 /* Are we allowed to snoop on the tasks file descriptors? */
65852- if (!proc_fd_access_allowed(inode))
65853- goto out;
65854+ /* logging this is needed for learning on chromium to work properly,
65855+ but we don't want to flood the logs from 'ps' which does a readlink
65856+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65857+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65858+ */
65859+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65860+ if (!proc_fd_access_allowed(inode,0))
65861+ goto out;
65862+ } else {
65863+ if (!proc_fd_access_allowed(inode,1))
65864+ goto out;
65865+ }
65866
65867 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65868 if (error)
65869@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65870 rcu_read_lock();
65871 cred = __task_cred(task);
65872 inode->i_uid = cred->euid;
65873+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65874+ inode->i_gid = grsec_proc_gid;
65875+#else
65876 inode->i_gid = cred->egid;
65877+#endif
65878 rcu_read_unlock();
65879 }
65880 security_task_to_inode(task, inode);
65881@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65882 return -ENOENT;
65883 }
65884 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65885+#ifdef CONFIG_GRKERNSEC_PROC_USER
65886+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65887+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65888+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65889+#endif
65890 task_dumpable(task)) {
65891 cred = __task_cred(task);
65892 stat->uid = cred->euid;
65893+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65894+ stat->gid = grsec_proc_gid;
65895+#else
65896 stat->gid = cred->egid;
65897+#endif
65898 }
65899 }
65900 rcu_read_unlock();
65901@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65902
65903 if (task) {
65904 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65905+#ifdef CONFIG_GRKERNSEC_PROC_USER
65906+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65907+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65908+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65909+#endif
65910 task_dumpable(task)) {
65911 rcu_read_lock();
65912 cred = __task_cred(task);
65913 inode->i_uid = cred->euid;
65914+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65915+ inode->i_gid = grsec_proc_gid;
65916+#else
65917 inode->i_gid = cred->egid;
65918+#endif
65919 rcu_read_unlock();
65920 } else {
65921 inode->i_uid = GLOBAL_ROOT_UID;
65922@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
65923 if (!task)
65924 goto out_no_task;
65925
65926+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65927+ goto out;
65928+
65929 /*
65930 * Yes, it does not scale. And it should not. Don't add
65931 * new entries into /proc/<tgid>/ without very good reasons.
65932@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
65933 if (!task)
65934 return -ENOENT;
65935
65936+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65937+ goto out;
65938+
65939 if (!dir_emit_dots(file, ctx))
65940 goto out;
65941
65942@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
65943 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
65944 #endif
65945 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65946-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65947+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65948 ONE("syscall", S_IRUSR, proc_pid_syscall),
65949 #endif
65950 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65951@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
65952 #ifdef CONFIG_SECURITY
65953 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65954 #endif
65955-#ifdef CONFIG_KALLSYMS
65956+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65957 ONE("wchan", S_IRUGO, proc_pid_wchan),
65958 #endif
65959-#ifdef CONFIG_STACKTRACE
65960+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65961 ONE("stack", S_IRUSR, proc_pid_stack),
65962 #endif
65963 #ifdef CONFIG_SCHEDSTATS
65964@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
65965 #ifdef CONFIG_HARDWALL
65966 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
65967 #endif
65968+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65969+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
65970+#endif
65971 #ifdef CONFIG_USER_NS
65972 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
65973 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
65974@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
65975 if (!inode)
65976 goto out;
65977
65978+#ifdef CONFIG_GRKERNSEC_PROC_USER
65979+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
65980+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65981+ inode->i_gid = grsec_proc_gid;
65982+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
65983+#else
65984 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
65985+#endif
65986 inode->i_op = &proc_tgid_base_inode_operations;
65987 inode->i_fop = &proc_tgid_base_operations;
65988 inode->i_flags|=S_IMMUTABLE;
65989@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
65990 if (!task)
65991 goto out;
65992
65993+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65994+ goto out_put_task;
65995+
65996 result = proc_pid_instantiate(dir, dentry, task, NULL);
65997+out_put_task:
65998 put_task_struct(task);
65999 out:
66000 return ERR_PTR(result);
66001@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
66002 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66003 #endif
66004 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66005-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66006+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66007 ONE("syscall", S_IRUSR, proc_pid_syscall),
66008 #endif
66009 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66010@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
66011 #ifdef CONFIG_SECURITY
66012 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66013 #endif
66014-#ifdef CONFIG_KALLSYMS
66015+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66016 ONE("wchan", S_IRUGO, proc_pid_wchan),
66017 #endif
66018-#ifdef CONFIG_STACKTRACE
66019+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66020 ONE("stack", S_IRUSR, proc_pid_stack),
66021 #endif
66022 #ifdef CONFIG_SCHEDSTATS
66023diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66024index cbd82df..c0407d2 100644
66025--- a/fs/proc/cmdline.c
66026+++ b/fs/proc/cmdline.c
66027@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66028
66029 static int __init proc_cmdline_init(void)
66030 {
66031+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66032+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66033+#else
66034 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66035+#endif
66036 return 0;
66037 }
66038 fs_initcall(proc_cmdline_init);
66039diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66040index 50493ed..248166b 100644
66041--- a/fs/proc/devices.c
66042+++ b/fs/proc/devices.c
66043@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66044
66045 static int __init proc_devices_init(void)
66046 {
66047+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66048+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66049+#else
66050 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66051+#endif
66052 return 0;
66053 }
66054 fs_initcall(proc_devices_init);
66055diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66056index 8e5ad83..1f07a8c 100644
66057--- a/fs/proc/fd.c
66058+++ b/fs/proc/fd.c
66059@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66060 if (!task)
66061 return -ENOENT;
66062
66063- files = get_files_struct(task);
66064+ if (!gr_acl_handle_procpidmem(task))
66065+ files = get_files_struct(task);
66066 put_task_struct(task);
66067
66068 if (files) {
66069@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66070 */
66071 int proc_fd_permission(struct inode *inode, int mask)
66072 {
66073+ struct task_struct *task;
66074 int rv = generic_permission(inode, mask);
66075- if (rv == 0)
66076- return 0;
66077+
66078 if (task_tgid(current) == proc_pid(inode))
66079 rv = 0;
66080+
66081+ task = get_proc_task(inode);
66082+ if (task == NULL)
66083+ return rv;
66084+
66085+ if (gr_acl_handle_procpidmem(task))
66086+ rv = -EACCES;
66087+
66088+ put_task_struct(task);
66089+
66090 return rv;
66091 }
66092
66093diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66094index b502bba..849e216 100644
66095--- a/fs/proc/generic.c
66096+++ b/fs/proc/generic.c
66097@@ -22,6 +22,7 @@
66098 #include <linux/bitops.h>
66099 #include <linux/spinlock.h>
66100 #include <linux/completion.h>
66101+#include <linux/grsecurity.h>
66102 #include <asm/uaccess.h>
66103
66104 #include "internal.h"
66105@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66106 return proc_lookup_de(PDE(dir), dir, dentry);
66107 }
66108
66109+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66110+ unsigned int flags)
66111+{
66112+ if (gr_proc_is_restricted())
66113+ return ERR_PTR(-EACCES);
66114+
66115+ return proc_lookup_de(PDE(dir), dir, dentry);
66116+}
66117+
66118 /*
66119 * This returns non-zero if at EOF, so that the /proc
66120 * root directory can use this and check if it should
66121@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66122 return proc_readdir_de(PDE(inode), file, ctx);
66123 }
66124
66125+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66126+{
66127+ struct inode *inode = file_inode(file);
66128+
66129+ if (gr_proc_is_restricted())
66130+ return -EACCES;
66131+
66132+ return proc_readdir_de(PDE(inode), file, ctx);
66133+}
66134+
66135 /*
66136 * These are the generic /proc directory operations. They
66137 * use the in-memory "struct proc_dir_entry" tree to parse
66138@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66139 .iterate = proc_readdir,
66140 };
66141
66142+static const struct file_operations proc_dir_restricted_operations = {
66143+ .llseek = generic_file_llseek,
66144+ .read = generic_read_dir,
66145+ .iterate = proc_readdir_restrict,
66146+};
66147+
66148 /*
66149 * proc directories can do almost nothing..
66150 */
66151@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66152 .setattr = proc_notify_change,
66153 };
66154
66155+static const struct inode_operations proc_dir_restricted_inode_operations = {
66156+ .lookup = proc_lookup_restrict,
66157+ .getattr = proc_getattr,
66158+ .setattr = proc_notify_change,
66159+};
66160+
66161 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66162 {
66163 int ret;
66164@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66165 return ret;
66166
66167 if (S_ISDIR(dp->mode)) {
66168- dp->proc_fops = &proc_dir_operations;
66169- dp->proc_iops = &proc_dir_inode_operations;
66170+ if (dp->restricted) {
66171+ dp->proc_fops = &proc_dir_restricted_operations;
66172+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66173+ } else {
66174+ dp->proc_fops = &proc_dir_operations;
66175+ dp->proc_iops = &proc_dir_inode_operations;
66176+ }
66177 dir->nlink++;
66178 } else if (S_ISLNK(dp->mode)) {
66179 dp->proc_iops = &proc_link_inode_operations;
66180@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66181 }
66182 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66183
66184+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66185+ struct proc_dir_entry *parent, void *data)
66186+{
66187+ struct proc_dir_entry *ent;
66188+
66189+ if (mode == 0)
66190+ mode = S_IRUGO | S_IXUGO;
66191+
66192+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66193+ if (ent) {
66194+ ent->data = data;
66195+ ent->restricted = 1;
66196+ if (proc_register(parent, ent) < 0) {
66197+ kfree(ent);
66198+ ent = NULL;
66199+ }
66200+ }
66201+ return ent;
66202+}
66203+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66204+
66205 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66206 struct proc_dir_entry *parent)
66207 {
66208@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66209 }
66210 EXPORT_SYMBOL(proc_mkdir);
66211
66212+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66213+ struct proc_dir_entry *parent)
66214+{
66215+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66216+}
66217+EXPORT_SYMBOL(proc_mkdir_restrict);
66218+
66219 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66220 struct proc_dir_entry *parent,
66221 const struct file_operations *proc_fops,
66222diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66223index 3b0f838..a0e0f63e 100644
66224--- a/fs/proc/inode.c
66225+++ b/fs/proc/inode.c
66226@@ -24,11 +24,17 @@
66227 #include <linux/mount.h>
66228 #include <linux/magic.h>
66229 #include <linux/namei.h>
66230+#include <linux/grsecurity.h>
66231
66232 #include <asm/uaccess.h>
66233
66234 #include "internal.h"
66235
66236+#ifdef CONFIG_PROC_SYSCTL
66237+extern const struct inode_operations proc_sys_inode_operations;
66238+extern const struct inode_operations proc_sys_dir_operations;
66239+#endif
66240+
66241 static void proc_evict_inode(struct inode *inode)
66242 {
66243 struct proc_dir_entry *de;
66244@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66245 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66246 sysctl_head_put(head);
66247 }
66248+
66249+#ifdef CONFIG_PROC_SYSCTL
66250+ if (inode->i_op == &proc_sys_inode_operations ||
66251+ inode->i_op == &proc_sys_dir_operations)
66252+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66253+#endif
66254+
66255 }
66256
66257 static struct kmem_cache * proc_inode_cachep;
66258@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66259 if (de->mode) {
66260 inode->i_mode = de->mode;
66261 inode->i_uid = de->uid;
66262+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66263+ inode->i_gid = grsec_proc_gid;
66264+#else
66265 inode->i_gid = de->gid;
66266+#endif
66267 }
66268 if (de->size)
66269 inode->i_size = de->size;
66270diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66271index c835b94..c9e01a3 100644
66272--- a/fs/proc/internal.h
66273+++ b/fs/proc/internal.h
66274@@ -47,9 +47,10 @@ struct proc_dir_entry {
66275 struct completion *pde_unload_completion;
66276 struct list_head pde_openers; /* who did ->open, but not ->release */
66277 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66278+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66279 u8 namelen;
66280 char name[];
66281-};
66282+} __randomize_layout;
66283
66284 union proc_op {
66285 int (*proc_get_link)(struct dentry *, struct path *);
66286@@ -67,7 +68,7 @@ struct proc_inode {
66287 struct ctl_table *sysctl_entry;
66288 const struct proc_ns_operations *ns_ops;
66289 struct inode vfs_inode;
66290-};
66291+} __randomize_layout;
66292
66293 /*
66294 * General functions
66295@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66296 struct pid *, struct task_struct *);
66297 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66298 struct pid *, struct task_struct *);
66299+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66300+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66301+ struct pid *, struct task_struct *);
66302+#endif
66303
66304 /*
66305 * base.c
66306@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66307 * generic.c
66308 */
66309 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66310+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66311 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66312 struct dentry *);
66313 extern int proc_readdir(struct file *, struct dir_context *);
66314+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66315 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66316
66317 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66318diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66319index a352d57..cb94a5c 100644
66320--- a/fs/proc/interrupts.c
66321+++ b/fs/proc/interrupts.c
66322@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66323
66324 static int __init proc_interrupts_init(void)
66325 {
66326+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66327+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66328+#else
66329 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66330+#endif
66331 return 0;
66332 }
66333 fs_initcall(proc_interrupts_init);
66334diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66335index 91a4e64..cb007c0 100644
66336--- a/fs/proc/kcore.c
66337+++ b/fs/proc/kcore.c
66338@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66339 * the addresses in the elf_phdr on our list.
66340 */
66341 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66342- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66343+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66344+ if (tsz > buflen)
66345 tsz = buflen;
66346-
66347+
66348 while (buflen) {
66349 struct kcore_list *m;
66350
66351@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66352 kfree(elf_buf);
66353 } else {
66354 if (kern_addr_valid(start)) {
66355- unsigned long n;
66356+ char *elf_buf;
66357+ mm_segment_t oldfs;
66358
66359- n = copy_to_user(buffer, (char *)start, tsz);
66360- /*
66361- * We cannot distinguish between fault on source
66362- * and fault on destination. When this happens
66363- * we clear too and hope it will trigger the
66364- * EFAULT again.
66365- */
66366- if (n) {
66367- if (clear_user(buffer + tsz - n,
66368- n))
66369+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66370+ if (!elf_buf)
66371+ return -ENOMEM;
66372+ oldfs = get_fs();
66373+ set_fs(KERNEL_DS);
66374+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66375+ set_fs(oldfs);
66376+ if (copy_to_user(buffer, elf_buf, tsz)) {
66377+ kfree(elf_buf);
66378 return -EFAULT;
66379+ }
66380 }
66381+ set_fs(oldfs);
66382+ kfree(elf_buf);
66383 } else {
66384 if (clear_user(buffer, tsz))
66385 return -EFAULT;
66386@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66387
66388 static int open_kcore(struct inode *inode, struct file *filp)
66389 {
66390+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66391+ return -EPERM;
66392+#endif
66393 if (!capable(CAP_SYS_RAWIO))
66394 return -EPERM;
66395 if (kcore_need_update)
66396diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66397index d3ebf2e..6ad42d1 100644
66398--- a/fs/proc/meminfo.c
66399+++ b/fs/proc/meminfo.c
66400@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66401 vmi.used >> 10,
66402 vmi.largest_chunk >> 10
66403 #ifdef CONFIG_MEMORY_FAILURE
66404- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66405+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66406 #endif
66407 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66408 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66409diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66410index d4a3574..b421ce9 100644
66411--- a/fs/proc/nommu.c
66412+++ b/fs/proc/nommu.c
66413@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66414
66415 if (file) {
66416 seq_pad(m, ' ');
66417- seq_path(m, &file->f_path, "");
66418+ seq_path(m, &file->f_path, "\n\\");
66419 }
66420
66421 seq_putc(m, '\n');
66422diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66423index 1bde894..22ac7eb 100644
66424--- a/fs/proc/proc_net.c
66425+++ b/fs/proc/proc_net.c
66426@@ -23,9 +23,27 @@
66427 #include <linux/nsproxy.h>
66428 #include <net/net_namespace.h>
66429 #include <linux/seq_file.h>
66430+#include <linux/grsecurity.h>
66431
66432 #include "internal.h"
66433
66434+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66435+static struct seq_operations *ipv6_seq_ops_addr;
66436+
66437+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66438+{
66439+ ipv6_seq_ops_addr = addr;
66440+}
66441+
66442+void unregister_ipv6_seq_ops_addr(void)
66443+{
66444+ ipv6_seq_ops_addr = NULL;
66445+}
66446+
66447+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66448+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66449+#endif
66450+
66451 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66452 {
66453 return pde->parent->data;
66454@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66455 return maybe_get_net(PDE_NET(PDE(inode)));
66456 }
66457
66458+extern const struct seq_operations dev_seq_ops;
66459+
66460 int seq_open_net(struct inode *ino, struct file *f,
66461 const struct seq_operations *ops, int size)
66462 {
66463@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66464
66465 BUG_ON(size < sizeof(*p));
66466
66467+ /* only permit access to /proc/net/dev */
66468+ if (
66469+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66470+ ops != ipv6_seq_ops_addr &&
66471+#endif
66472+ ops != &dev_seq_ops && gr_proc_is_restricted())
66473+ return -EACCES;
66474+
66475 net = get_proc_net(ino);
66476 if (net == NULL)
66477 return -ENXIO;
66478@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66479 int err;
66480 struct net *net;
66481
66482+ if (gr_proc_is_restricted())
66483+ return -EACCES;
66484+
66485 err = -ENXIO;
66486 net = get_proc_net(inode);
66487 if (net == NULL)
66488diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66489index f92d5dd..26398ac 100644
66490--- a/fs/proc/proc_sysctl.c
66491+++ b/fs/proc/proc_sysctl.c
66492@@ -11,13 +11,21 @@
66493 #include <linux/namei.h>
66494 #include <linux/mm.h>
66495 #include <linux/module.h>
66496+#include <linux/nsproxy.h>
66497+#ifdef CONFIG_GRKERNSEC
66498+#include <net/net_namespace.h>
66499+#endif
66500 #include "internal.h"
66501
66502+extern int gr_handle_chroot_sysctl(const int op);
66503+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66504+ const int op);
66505+
66506 static const struct dentry_operations proc_sys_dentry_operations;
66507 static const struct file_operations proc_sys_file_operations;
66508-static const struct inode_operations proc_sys_inode_operations;
66509+const struct inode_operations proc_sys_inode_operations;
66510 static const struct file_operations proc_sys_dir_file_operations;
66511-static const struct inode_operations proc_sys_dir_operations;
66512+const struct inode_operations proc_sys_dir_operations;
66513
66514 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66515 {
66516@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66517
66518 err = NULL;
66519 d_set_d_op(dentry, &proc_sys_dentry_operations);
66520+
66521+ gr_handle_proc_create(dentry, inode);
66522+
66523 d_add(dentry, inode);
66524
66525 out:
66526@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66527 struct inode *inode = file_inode(filp);
66528 struct ctl_table_header *head = grab_header(inode);
66529 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66530+ int op = write ? MAY_WRITE : MAY_READ;
66531 ssize_t error;
66532 size_t res;
66533
66534@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66535 * and won't be until we finish.
66536 */
66537 error = -EPERM;
66538- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66539+ if (sysctl_perm(head, table, op))
66540 goto out;
66541
66542 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66543@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66544 if (!table->proc_handler)
66545 goto out;
66546
66547+#ifdef CONFIG_GRKERNSEC
66548+ error = -EPERM;
66549+ if (gr_handle_chroot_sysctl(op))
66550+ goto out;
66551+ dget(filp->f_path.dentry);
66552+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66553+ dput(filp->f_path.dentry);
66554+ goto out;
66555+ }
66556+ dput(filp->f_path.dentry);
66557+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66558+ goto out;
66559+ if (write) {
66560+ if (current->nsproxy->net_ns != table->extra2) {
66561+ if (!capable(CAP_SYS_ADMIN))
66562+ goto out;
66563+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66564+ goto out;
66565+ }
66566+#endif
66567+
66568 /* careful: calling conventions are nasty here */
66569 res = count;
66570 error = table->proc_handler(table, write, buf, &res, ppos);
66571@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66572 return false;
66573 } else {
66574 d_set_d_op(child, &proc_sys_dentry_operations);
66575+
66576+ gr_handle_proc_create(child, inode);
66577+
66578 d_add(child, inode);
66579 }
66580 } else {
66581@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66582 if ((*pos)++ < ctx->pos)
66583 return true;
66584
66585+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66586+ return 0;
66587+
66588 if (unlikely(S_ISLNK(table->mode)))
66589 res = proc_sys_link_fill_cache(file, ctx, head, table);
66590 else
66591@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66592 if (IS_ERR(head))
66593 return PTR_ERR(head);
66594
66595+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66596+ return -ENOENT;
66597+
66598 generic_fillattr(inode, stat);
66599 if (table)
66600 stat->mode = (stat->mode & S_IFMT) | table->mode;
66601@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66602 .llseek = generic_file_llseek,
66603 };
66604
66605-static const struct inode_operations proc_sys_inode_operations = {
66606+const struct inode_operations proc_sys_inode_operations = {
66607 .permission = proc_sys_permission,
66608 .setattr = proc_sys_setattr,
66609 .getattr = proc_sys_getattr,
66610 };
66611
66612-static const struct inode_operations proc_sys_dir_operations = {
66613+const struct inode_operations proc_sys_dir_operations = {
66614 .lookup = proc_sys_lookup,
66615 .permission = proc_sys_permission,
66616 .setattr = proc_sys_setattr,
66617@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66618 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66619 const char *name, int namelen)
66620 {
66621- struct ctl_table *table;
66622+ ctl_table_no_const *table;
66623 struct ctl_dir *new;
66624 struct ctl_node *node;
66625 char *new_name;
66626@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66627 return NULL;
66628
66629 node = (struct ctl_node *)(new + 1);
66630- table = (struct ctl_table *)(node + 1);
66631+ table = (ctl_table_no_const *)(node + 1);
66632 new_name = (char *)(table + 2);
66633 memcpy(new_name, name, namelen);
66634 new_name[namelen] = '\0';
66635@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66636 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66637 struct ctl_table_root *link_root)
66638 {
66639- struct ctl_table *link_table, *entry, *link;
66640+ ctl_table_no_const *link_table, *link;
66641+ struct ctl_table *entry;
66642 struct ctl_table_header *links;
66643 struct ctl_node *node;
66644 char *link_name;
66645@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66646 return NULL;
66647
66648 node = (struct ctl_node *)(links + 1);
66649- link_table = (struct ctl_table *)(node + nr_entries);
66650+ link_table = (ctl_table_no_const *)(node + nr_entries);
66651 link_name = (char *)&link_table[nr_entries + 1];
66652
66653 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66654@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66655 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66656 struct ctl_table *table)
66657 {
66658- struct ctl_table *ctl_table_arg = NULL;
66659- struct ctl_table *entry, *files;
66660+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66661+ struct ctl_table *entry;
66662 int nr_files = 0;
66663 int nr_dirs = 0;
66664 int err = -ENOMEM;
66665@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66666 nr_files++;
66667 }
66668
66669- files = table;
66670 /* If there are mixed files and directories we need a new table */
66671 if (nr_dirs && nr_files) {
66672- struct ctl_table *new;
66673+ ctl_table_no_const *new;
66674 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66675 GFP_KERNEL);
66676 if (!files)
66677@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66678 /* Register everything except a directory full of subdirectories */
66679 if (nr_files || !nr_dirs) {
66680 struct ctl_table_header *header;
66681- header = __register_sysctl_table(set, path, files);
66682+ header = __register_sysctl_table(set, path, files ? files : table);
66683 if (!header) {
66684 kfree(ctl_table_arg);
66685 goto out;
66686diff --git a/fs/proc/root.c b/fs/proc/root.c
66687index e74ac9f..35e89f4 100644
66688--- a/fs/proc/root.c
66689+++ b/fs/proc/root.c
66690@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66691 proc_mkdir("openprom", NULL);
66692 #endif
66693 proc_tty_init();
66694+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66695+#ifdef CONFIG_GRKERNSEC_PROC_USER
66696+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66697+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66698+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66699+#endif
66700+#else
66701 proc_mkdir("bus", NULL);
66702+#endif
66703 proc_sys_init();
66704 }
66705
66706diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66707index 510413eb..34d9a8c 100644
66708--- a/fs/proc/stat.c
66709+++ b/fs/proc/stat.c
66710@@ -11,6 +11,7 @@
66711 #include <linux/irqnr.h>
66712 #include <linux/cputime.h>
66713 #include <linux/tick.h>
66714+#include <linux/grsecurity.h>
66715
66716 #ifndef arch_irq_stat_cpu
66717 #define arch_irq_stat_cpu(cpu) 0
66718@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66719 u64 sum_softirq = 0;
66720 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66721 struct timespec boottime;
66722+ int unrestricted = 1;
66723+
66724+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66725+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66726+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66727+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66728+ && !in_group_p(grsec_proc_gid)
66729+#endif
66730+ )
66731+ unrestricted = 0;
66732+#endif
66733+#endif
66734
66735 user = nice = system = idle = iowait =
66736 irq = softirq = steal = 0;
66737@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66738 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66739 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66740 idle += get_idle_time(i);
66741- iowait += get_iowait_time(i);
66742- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66743- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66744- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66745- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66746- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66747- sum += kstat_cpu_irqs_sum(i);
66748- sum += arch_irq_stat_cpu(i);
66749+ if (unrestricted) {
66750+ iowait += get_iowait_time(i);
66751+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66752+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66753+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66754+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66755+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66756+ sum += kstat_cpu_irqs_sum(i);
66757+ sum += arch_irq_stat_cpu(i);
66758+ for (j = 0; j < NR_SOFTIRQS; j++) {
66759+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66760
66761- for (j = 0; j < NR_SOFTIRQS; j++) {
66762- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66763-
66764- per_softirq_sums[j] += softirq_stat;
66765- sum_softirq += softirq_stat;
66766+ per_softirq_sums[j] += softirq_stat;
66767+ sum_softirq += softirq_stat;
66768+ }
66769 }
66770 }
66771- sum += arch_irq_stat();
66772+ if (unrestricted)
66773+ sum += arch_irq_stat();
66774
66775 seq_puts(p, "cpu ");
66776 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66777@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66778 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66779 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66780 idle = get_idle_time(i);
66781- iowait = get_iowait_time(i);
66782- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66783- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66784- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66785- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66786- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66787+ if (unrestricted) {
66788+ iowait = get_iowait_time(i);
66789+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66790+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66791+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66792+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66793+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66794+ }
66795 seq_printf(p, "cpu%d", i);
66796 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66797 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66798@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66799
66800 /* sum again ? it could be updated? */
66801 for_each_irq_nr(j)
66802- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66803+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66804
66805 seq_printf(p,
66806 "\nctxt %llu\n"
66807@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66808 "processes %lu\n"
66809 "procs_running %lu\n"
66810 "procs_blocked %lu\n",
66811- nr_context_switches(),
66812+ unrestricted ? nr_context_switches() : 0ULL,
66813 (unsigned long)jif,
66814- total_forks,
66815- nr_running(),
66816- nr_iowait());
66817+ unrestricted ? total_forks : 0UL,
66818+ unrestricted ? nr_running() : 0UL,
66819+ unrestricted ? nr_iowait() : 0UL);
66820
66821 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66822
66823diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66824index f86e549..3a88fcd 100644
66825--- a/fs/proc/task_mmu.c
66826+++ b/fs/proc/task_mmu.c
66827@@ -13,12 +13,19 @@
66828 #include <linux/swap.h>
66829 #include <linux/swapops.h>
66830 #include <linux/mmu_notifier.h>
66831+#include <linux/grsecurity.h>
66832
66833 #include <asm/elf.h>
66834 #include <asm/uaccess.h>
66835 #include <asm/tlbflush.h>
66836 #include "internal.h"
66837
66838+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66839+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66840+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66841+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66842+#endif
66843+
66844 void task_mem(struct seq_file *m, struct mm_struct *mm)
66845 {
66846 unsigned long data, text, lib, swap;
66847@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66848 "VmExe:\t%8lu kB\n"
66849 "VmLib:\t%8lu kB\n"
66850 "VmPTE:\t%8lu kB\n"
66851- "VmSwap:\t%8lu kB\n",
66852- hiwater_vm << (PAGE_SHIFT-10),
66853+ "VmSwap:\t%8lu kB\n"
66854+
66855+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66856+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66857+#endif
66858+
66859+ ,hiwater_vm << (PAGE_SHIFT-10),
66860 total_vm << (PAGE_SHIFT-10),
66861 mm->locked_vm << (PAGE_SHIFT-10),
66862 mm->pinned_vm << (PAGE_SHIFT-10),
66863@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66864 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66865 (PTRS_PER_PTE * sizeof(pte_t) *
66866 atomic_long_read(&mm->nr_ptes)) >> 10,
66867- swap << (PAGE_SHIFT-10));
66868+ swap << (PAGE_SHIFT-10)
66869+
66870+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66871+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66872+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66873+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66874+#else
66875+ , mm->context.user_cs_base
66876+ , mm->context.user_cs_limit
66877+#endif
66878+#endif
66879+
66880+ );
66881 }
66882
66883 unsigned long task_vsize(struct mm_struct *mm)
66884@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66885 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66886 }
66887
66888- /* We don't show the stack guard page in /proc/maps */
66889+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66890+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66891+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66892+#else
66893 start = vma->vm_start;
66894- if (stack_guard_page_start(vma, start))
66895- start += PAGE_SIZE;
66896 end = vma->vm_end;
66897- if (stack_guard_page_end(vma, end))
66898- end -= PAGE_SIZE;
66899+#endif
66900
66901 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66902 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66903@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66904 flags & VM_WRITE ? 'w' : '-',
66905 flags & VM_EXEC ? 'x' : '-',
66906 flags & VM_MAYSHARE ? 's' : 'p',
66907+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66908+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66909+#else
66910 pgoff,
66911+#endif
66912 MAJOR(dev), MINOR(dev), ino);
66913
66914 /*
66915@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66916 */
66917 if (file) {
66918 seq_pad(m, ' ');
66919- seq_path(m, &file->f_path, "\n");
66920+ seq_path(m, &file->f_path, "\n\\");
66921 goto done;
66922 }
66923
66924@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66925 * Thread stack in /proc/PID/task/TID/maps or
66926 * the main process stack.
66927 */
66928- if (!is_pid || (vma->vm_start <= mm->start_stack &&
66929- vma->vm_end >= mm->start_stack)) {
66930+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
66931+ (vma->vm_start <= mm->start_stack &&
66932+ vma->vm_end >= mm->start_stack)) {
66933 name = "[stack]";
66934 } else {
66935 /* Thread stack in /proc/PID/maps */
66936@@ -359,6 +388,12 @@ done:
66937
66938 static int show_map(struct seq_file *m, void *v, int is_pid)
66939 {
66940+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66941+ if (current->exec_id != m->exec_id) {
66942+ gr_log_badprocpid("maps");
66943+ return 0;
66944+ }
66945+#endif
66946 show_map_vma(m, v, is_pid);
66947 m_cache_vma(m, v);
66948 return 0;
66949@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66950 .private = &mss,
66951 };
66952
66953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66954+ if (current->exec_id != m->exec_id) {
66955+ gr_log_badprocpid("smaps");
66956+ return 0;
66957+ }
66958+#endif
66959 memset(&mss, 0, sizeof mss);
66960- mss.vma = vma;
66961- /* mmap_sem is held in m_start */
66962- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66963- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66964-
66965+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66966+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
66967+#endif
66968+ mss.vma = vma;
66969+ /* mmap_sem is held in m_start */
66970+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66971+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66972+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66973+ }
66974+#endif
66975 show_map_vma(m, vma, is_pid);
66976
66977 seq_printf(m,
66978@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66979 "KernelPageSize: %8lu kB\n"
66980 "MMUPageSize: %8lu kB\n"
66981 "Locked: %8lu kB\n",
66982+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66983+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
66984+#else
66985 (vma->vm_end - vma->vm_start) >> 10,
66986+#endif
66987 mss.resident >> 10,
66988 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
66989 mss.shared_clean >> 10,
66990@@ -1489,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66991 char buffer[64];
66992 int nid;
66993
66994+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66995+ if (current->exec_id != m->exec_id) {
66996+ gr_log_badprocpid("numa_maps");
66997+ return 0;
66998+ }
66999+#endif
67000+
67001 if (!mm)
67002 return 0;
67003
67004@@ -1510,11 +1567,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67005 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
67006 }
67007
67008+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67009+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67010+#else
67011 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67012+#endif
67013
67014 if (file) {
67015 seq_puts(m, " file=");
67016- seq_path(m, &file->f_path, "\n\t= ");
67017+ seq_path(m, &file->f_path, "\n\t\\= ");
67018 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67019 seq_puts(m, " heap");
67020 } else {
67021diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67022index 599ec2e..f1413ae 100644
67023--- a/fs/proc/task_nommu.c
67024+++ b/fs/proc/task_nommu.c
67025@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67026 else
67027 bytes += kobjsize(mm);
67028
67029- if (current->fs && current->fs->users > 1)
67030+ if (current->fs && atomic_read(&current->fs->users) > 1)
67031 sbytes += kobjsize(current->fs);
67032 else
67033 bytes += kobjsize(current->fs);
67034@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67035
67036 if (file) {
67037 seq_pad(m, ' ');
67038- seq_path(m, &file->f_path, "");
67039+ seq_path(m, &file->f_path, "\n\\");
67040 } else if (mm) {
67041 pid_t tid = pid_of_stack(priv, vma, is_pid);
67042
67043diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67044index a90d6d35..d08047c 100644
67045--- a/fs/proc/vmcore.c
67046+++ b/fs/proc/vmcore.c
67047@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67048 nr_bytes = count;
67049
67050 /* If pfn is not ram, return zeros for sparse dump files */
67051- if (pfn_is_ram(pfn) == 0)
67052- memset(buf, 0, nr_bytes);
67053- else {
67054+ if (pfn_is_ram(pfn) == 0) {
67055+ if (userbuf) {
67056+ if (clear_user((char __force_user *)buf, nr_bytes))
67057+ return -EFAULT;
67058+ } else
67059+ memset(buf, 0, nr_bytes);
67060+ } else {
67061 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67062 offset, userbuf);
67063 if (tmp < 0)
67064@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67065 static int copy_to(void *target, void *src, size_t size, int userbuf)
67066 {
67067 if (userbuf) {
67068- if (copy_to_user((char __user *) target, src, size))
67069+ if (copy_to_user((char __force_user *) target, src, size))
67070 return -EFAULT;
67071 } else {
67072 memcpy(target, src, size);
67073@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67074 if (*fpos < m->offset + m->size) {
67075 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67076 start = m->paddr + *fpos - m->offset;
67077- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67078+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67079 if (tmp < 0)
67080 return tmp;
67081 buflen -= tsz;
67082@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67083 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67084 size_t buflen, loff_t *fpos)
67085 {
67086- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67087+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67088 }
67089
67090 /*
67091diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67092index d3fb2b6..43a8140 100644
67093--- a/fs/qnx6/qnx6.h
67094+++ b/fs/qnx6/qnx6.h
67095@@ -74,7 +74,7 @@ enum {
67096 BYTESEX_BE,
67097 };
67098
67099-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67100+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67101 {
67102 if (sbi->s_bytesex == BYTESEX_LE)
67103 return le64_to_cpu((__force __le64)n);
67104@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67105 return (__force __fs64)cpu_to_be64(n);
67106 }
67107
67108-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67109+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67110 {
67111 if (sbi->s_bytesex == BYTESEX_LE)
67112 return le32_to_cpu((__force __le32)n);
67113diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67114index bb2869f..d34ada8 100644
67115--- a/fs/quota/netlink.c
67116+++ b/fs/quota/netlink.c
67117@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67118 void quota_send_warning(struct kqid qid, dev_t dev,
67119 const char warntype)
67120 {
67121- static atomic_t seq;
67122+ static atomic_unchecked_t seq;
67123 struct sk_buff *skb;
67124 void *msg_head;
67125 int ret;
67126@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67127 "VFS: Not enough memory to send quota warning.\n");
67128 return;
67129 }
67130- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67131+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67132 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67133 if (!msg_head) {
67134 printk(KERN_ERR
67135diff --git a/fs/read_write.c b/fs/read_write.c
67136index c0805c93..d39f2eb 100644
67137--- a/fs/read_write.c
67138+++ b/fs/read_write.c
67139@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67140
67141 old_fs = get_fs();
67142 set_fs(get_ds());
67143- p = (__force const char __user *)buf;
67144+ p = (const char __force_user *)buf;
67145 if (count > MAX_RW_COUNT)
67146 count = MAX_RW_COUNT;
67147 if (file->f_op->write)
67148diff --git a/fs/readdir.c b/fs/readdir.c
67149index ced6791..936687b 100644
67150--- a/fs/readdir.c
67151+++ b/fs/readdir.c
67152@@ -18,6 +18,7 @@
67153 #include <linux/security.h>
67154 #include <linux/syscalls.h>
67155 #include <linux/unistd.h>
67156+#include <linux/namei.h>
67157
67158 #include <asm/uaccess.h>
67159
67160@@ -71,6 +72,7 @@ struct old_linux_dirent {
67161 struct readdir_callback {
67162 struct dir_context ctx;
67163 struct old_linux_dirent __user * dirent;
67164+ struct file * file;
67165 int result;
67166 };
67167
67168@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67169 buf->result = -EOVERFLOW;
67170 return -EOVERFLOW;
67171 }
67172+
67173+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67174+ return 0;
67175+
67176 buf->result++;
67177 dirent = buf->dirent;
67178 if (!access_ok(VERIFY_WRITE, dirent,
67179@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67180 if (!f.file)
67181 return -EBADF;
67182
67183+ buf.file = f.file;
67184 error = iterate_dir(f.file, &buf.ctx);
67185 if (buf.result)
67186 error = buf.result;
67187@@ -145,6 +152,7 @@ struct getdents_callback {
67188 struct dir_context ctx;
67189 struct linux_dirent __user * current_dir;
67190 struct linux_dirent __user * previous;
67191+ struct file * file;
67192 int count;
67193 int error;
67194 };
67195@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67196 buf->error = -EOVERFLOW;
67197 return -EOVERFLOW;
67198 }
67199+
67200+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67201+ return 0;
67202+
67203 dirent = buf->previous;
67204 if (dirent) {
67205 if (__put_user(offset, &dirent->d_off))
67206@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67207 if (!f.file)
67208 return -EBADF;
67209
67210+ buf.file = f.file;
67211 error = iterate_dir(f.file, &buf.ctx);
67212 if (error >= 0)
67213 error = buf.error;
67214@@ -230,6 +243,7 @@ struct getdents_callback64 {
67215 struct dir_context ctx;
67216 struct linux_dirent64 __user * current_dir;
67217 struct linux_dirent64 __user * previous;
67218+ struct file *file;
67219 int count;
67220 int error;
67221 };
67222@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67223 buf->error = -EINVAL; /* only used if we fail.. */
67224 if (reclen > buf->count)
67225 return -EINVAL;
67226+
67227+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67228+ return 0;
67229+
67230 dirent = buf->previous;
67231 if (dirent) {
67232 if (__put_user(offset, &dirent->d_off))
67233@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67234 if (!f.file)
67235 return -EBADF;
67236
67237+ buf.file = f.file;
67238 error = iterate_dir(f.file, &buf.ctx);
67239 if (error >= 0)
67240 error = buf.error;
67241diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67242index 9c02d96..6562c10 100644
67243--- a/fs/reiserfs/do_balan.c
67244+++ b/fs/reiserfs/do_balan.c
67245@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67246 return;
67247 }
67248
67249- atomic_inc(&fs_generation(tb->tb_sb));
67250+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67251 do_balance_starts(tb);
67252
67253 /*
67254diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67255index aca73dd..e3c558d 100644
67256--- a/fs/reiserfs/item_ops.c
67257+++ b/fs/reiserfs/item_ops.c
67258@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67259 }
67260
67261 static struct item_operations errcatch_ops = {
67262- errcatch_bytes_number,
67263- errcatch_decrement_key,
67264- errcatch_is_left_mergeable,
67265- errcatch_print_item,
67266- errcatch_check_item,
67267+ .bytes_number = errcatch_bytes_number,
67268+ .decrement_key = errcatch_decrement_key,
67269+ .is_left_mergeable = errcatch_is_left_mergeable,
67270+ .print_item = errcatch_print_item,
67271+ .check_item = errcatch_check_item,
67272
67273- errcatch_create_vi,
67274- errcatch_check_left,
67275- errcatch_check_right,
67276- errcatch_part_size,
67277- errcatch_unit_num,
67278- errcatch_print_vi
67279+ .create_vi = errcatch_create_vi,
67280+ .check_left = errcatch_check_left,
67281+ .check_right = errcatch_check_right,
67282+ .part_size = errcatch_part_size,
67283+ .unit_num = errcatch_unit_num,
67284+ .print_vi = errcatch_print_vi
67285 };
67286
67287 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67288diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67289index 621b9f3..af527fd 100644
67290--- a/fs/reiserfs/procfs.c
67291+++ b/fs/reiserfs/procfs.c
67292@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67293 "SMALL_TAILS " : "NO_TAILS ",
67294 replay_only(sb) ? "REPLAY_ONLY " : "",
67295 convert_reiserfs(sb) ? "CONV " : "",
67296- atomic_read(&r->s_generation_counter),
67297+ atomic_read_unchecked(&r->s_generation_counter),
67298 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67299 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67300 SF(s_good_search_by_key_reada), SF(s_bmaps),
67301diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67302index bb79cdd..fcf49ef 100644
67303--- a/fs/reiserfs/reiserfs.h
67304+++ b/fs/reiserfs/reiserfs.h
67305@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67306 /* Comment? -Hans */
67307 wait_queue_head_t s_wait;
67308 /* increased by one every time the tree gets re-balanced */
67309- atomic_t s_generation_counter;
67310+ atomic_unchecked_t s_generation_counter;
67311
67312 /* File system properties. Currently holds on-disk FS format */
67313 unsigned long s_properties;
67314@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67315 #define REISERFS_USER_MEM 1 /* user memory mode */
67316
67317 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67318-#define get_generation(s) atomic_read (&fs_generation(s))
67319+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67320 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67321 #define __fs_changed(gen,s) (gen != get_generation (s))
67322 #define fs_changed(gen,s) \
67323diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67324index 71fbbe3..eff29ba 100644
67325--- a/fs/reiserfs/super.c
67326+++ b/fs/reiserfs/super.c
67327@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67328 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67329 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67330 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67331+#ifdef CONFIG_REISERFS_FS_XATTR
67332+ /* turn on user xattrs by default */
67333+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67334+#endif
67335 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67336 sbi->s_alloc_options.preallocmin = 0;
67337 /* Preallocate by 16 blocks (17-1) at once */
67338diff --git a/fs/select.c b/fs/select.c
67339index 467bb1c..cf9d65a 100644
67340--- a/fs/select.c
67341+++ b/fs/select.c
67342@@ -20,6 +20,7 @@
67343 #include <linux/export.h>
67344 #include <linux/slab.h>
67345 #include <linux/poll.h>
67346+#include <linux/security.h>
67347 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67348 #include <linux/file.h>
67349 #include <linux/fdtable.h>
67350@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67351 struct poll_list *walk = head;
67352 unsigned long todo = nfds;
67353
67354+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67355 if (nfds > rlimit(RLIMIT_NOFILE))
67356 return -EINVAL;
67357
67358diff --git a/fs/seq_file.c b/fs/seq_file.c
67359index dbf3a59..daf023f 100644
67360--- a/fs/seq_file.c
67361+++ b/fs/seq_file.c
67362@@ -12,6 +12,8 @@
67363 #include <linux/slab.h>
67364 #include <linux/cred.h>
67365 #include <linux/mm.h>
67366+#include <linux/sched.h>
67367+#include <linux/grsecurity.h>
67368
67369 #include <asm/uaccess.h>
67370 #include <asm/page.h>
67371@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67372
67373 static void *seq_buf_alloc(unsigned long size)
67374 {
67375- void *buf;
67376-
67377- /*
67378- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67379- * it's better to fall back to vmalloc() than to kill things.
67380- */
67381- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67382- if (!buf && size > PAGE_SIZE)
67383- buf = vmalloc(size);
67384- return buf;
67385+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67386 }
67387
67388 /**
67389@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67390 #ifdef CONFIG_USER_NS
67391 p->user_ns = file->f_cred->user_ns;
67392 #endif
67393+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67394+ p->exec_id = current->exec_id;
67395+#endif
67396
67397 /*
67398 * Wrappers around seq_open(e.g. swaps_open) need to be
67399@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67400 }
67401 EXPORT_SYMBOL(seq_open);
67402
67403+
67404+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67405+{
67406+ if (gr_proc_is_restricted())
67407+ return -EACCES;
67408+
67409+ return seq_open(file, op);
67410+}
67411+EXPORT_SYMBOL(seq_open_restrict);
67412+
67413 static int traverse(struct seq_file *m, loff_t offset)
67414 {
67415 loff_t pos = 0, index;
67416@@ -158,7 +164,7 @@ Eoverflow:
67417 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67418 {
67419 struct seq_file *m = file->private_data;
67420- size_t copied = 0;
67421+ ssize_t copied = 0;
67422 loff_t pos;
67423 size_t n;
67424 void *p;
67425@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67426 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67427 void *data)
67428 {
67429- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67430+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67431 int res = -ENOMEM;
67432
67433 if (op) {
67434@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67435 }
67436 EXPORT_SYMBOL(single_open_size);
67437
67438+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67439+ void *data)
67440+{
67441+ if (gr_proc_is_restricted())
67442+ return -EACCES;
67443+
67444+ return single_open(file, show, data);
67445+}
67446+EXPORT_SYMBOL(single_open_restrict);
67447+
67448+
67449 int single_release(struct inode *inode, struct file *file)
67450 {
67451 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67452diff --git a/fs/splice.c b/fs/splice.c
67453index 75c6058..770d40c 100644
67454--- a/fs/splice.c
67455+++ b/fs/splice.c
67456@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67457 pipe_lock(pipe);
67458
67459 for (;;) {
67460- if (!pipe->readers) {
67461+ if (!atomic_read(&pipe->readers)) {
67462 send_sig(SIGPIPE, current, 0);
67463 if (!ret)
67464 ret = -EPIPE;
67465@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67466 page_nr++;
67467 ret += buf->len;
67468
67469- if (pipe->files)
67470+ if (atomic_read(&pipe->files))
67471 do_wakeup = 1;
67472
67473 if (!--spd->nr_pages)
67474@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67475 do_wakeup = 0;
67476 }
67477
67478- pipe->waiting_writers++;
67479+ atomic_inc(&pipe->waiting_writers);
67480 pipe_wait(pipe);
67481- pipe->waiting_writers--;
67482+ atomic_dec(&pipe->waiting_writers);
67483 }
67484
67485 pipe_unlock(pipe);
67486@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67487 old_fs = get_fs();
67488 set_fs(get_ds());
67489 /* The cast to a user pointer is valid due to the set_fs() */
67490- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67491+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67492 set_fs(old_fs);
67493
67494 return res;
67495@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67496 old_fs = get_fs();
67497 set_fs(get_ds());
67498 /* The cast to a user pointer is valid due to the set_fs() */
67499- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67500+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67501 set_fs(old_fs);
67502
67503 return res;
67504@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67505 goto err;
67506
67507 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67508- vec[i].iov_base = (void __user *) page_address(page);
67509+ vec[i].iov_base = (void __force_user *) page_address(page);
67510 vec[i].iov_len = this_len;
67511 spd.pages[i] = page;
67512 spd.nr_pages++;
67513@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67514 ops->release(pipe, buf);
67515 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67516 pipe->nrbufs--;
67517- if (pipe->files)
67518+ if (atomic_read(&pipe->files))
67519 sd->need_wakeup = true;
67520 }
67521
67522@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67523 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67524 {
67525 while (!pipe->nrbufs) {
67526- if (!pipe->writers)
67527+ if (!atomic_read(&pipe->writers))
67528 return 0;
67529
67530- if (!pipe->waiting_writers && sd->num_spliced)
67531+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67532 return 0;
67533
67534 if (sd->flags & SPLICE_F_NONBLOCK)
67535@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67536 ops->release(pipe, buf);
67537 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67538 pipe->nrbufs--;
67539- if (pipe->files)
67540+ if (atomic_read(&pipe->files))
67541 sd.need_wakeup = true;
67542 } else {
67543 buf->offset += ret;
67544@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67545 * out of the pipe right after the splice_to_pipe(). So set
67546 * PIPE_READERS appropriately.
67547 */
67548- pipe->readers = 1;
67549+ atomic_set(&pipe->readers, 1);
67550
67551 current->splice_pipe = pipe;
67552 }
67553@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67554
67555 partial[buffers].offset = off;
67556 partial[buffers].len = plen;
67557+ partial[buffers].private = 0;
67558
67559 off = 0;
67560 len -= plen;
67561@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67562 ret = -ERESTARTSYS;
67563 break;
67564 }
67565- if (!pipe->writers)
67566+ if (!atomic_read(&pipe->writers))
67567 break;
67568- if (!pipe->waiting_writers) {
67569+ if (!atomic_read(&pipe->waiting_writers)) {
67570 if (flags & SPLICE_F_NONBLOCK) {
67571 ret = -EAGAIN;
67572 break;
67573@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67574 pipe_lock(pipe);
67575
67576 while (pipe->nrbufs >= pipe->buffers) {
67577- if (!pipe->readers) {
67578+ if (!atomic_read(&pipe->readers)) {
67579 send_sig(SIGPIPE, current, 0);
67580 ret = -EPIPE;
67581 break;
67582@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67583 ret = -ERESTARTSYS;
67584 break;
67585 }
67586- pipe->waiting_writers++;
67587+ atomic_inc(&pipe->waiting_writers);
67588 pipe_wait(pipe);
67589- pipe->waiting_writers--;
67590+ atomic_dec(&pipe->waiting_writers);
67591 }
67592
67593 pipe_unlock(pipe);
67594@@ -1818,14 +1819,14 @@ retry:
67595 pipe_double_lock(ipipe, opipe);
67596
67597 do {
67598- if (!opipe->readers) {
67599+ if (!atomic_read(&opipe->readers)) {
67600 send_sig(SIGPIPE, current, 0);
67601 if (!ret)
67602 ret = -EPIPE;
67603 break;
67604 }
67605
67606- if (!ipipe->nrbufs && !ipipe->writers)
67607+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67608 break;
67609
67610 /*
67611@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67612 pipe_double_lock(ipipe, opipe);
67613
67614 do {
67615- if (!opipe->readers) {
67616+ if (!atomic_read(&opipe->readers)) {
67617 send_sig(SIGPIPE, current, 0);
67618 if (!ret)
67619 ret = -EPIPE;
67620@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67621 * return EAGAIN if we have the potential of some data in the
67622 * future, otherwise just return 0
67623 */
67624- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67625+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67626 ret = -EAGAIN;
67627
67628 pipe_unlock(ipipe);
67629diff --git a/fs/stat.c b/fs/stat.c
67630index ae0c3ce..9ee641c 100644
67631--- a/fs/stat.c
67632+++ b/fs/stat.c
67633@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67634 stat->gid = inode->i_gid;
67635 stat->rdev = inode->i_rdev;
67636 stat->size = i_size_read(inode);
67637- stat->atime = inode->i_atime;
67638- stat->mtime = inode->i_mtime;
67639+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67640+ stat->atime = inode->i_ctime;
67641+ stat->mtime = inode->i_ctime;
67642+ } else {
67643+ stat->atime = inode->i_atime;
67644+ stat->mtime = inode->i_mtime;
67645+ }
67646 stat->ctime = inode->i_ctime;
67647 stat->blksize = (1 << inode->i_blkbits);
67648 stat->blocks = inode->i_blocks;
67649@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67650 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67651 {
67652 struct inode *inode = path->dentry->d_inode;
67653+ int retval;
67654
67655- if (inode->i_op->getattr)
67656- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67657+ if (inode->i_op->getattr) {
67658+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67659+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67660+ stat->atime = stat->ctime;
67661+ stat->mtime = stat->ctime;
67662+ }
67663+ return retval;
67664+ }
67665
67666 generic_fillattr(inode, stat);
67667 return 0;
67668diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67669index 0b45ff4..edf9d3a 100644
67670--- a/fs/sysfs/dir.c
67671+++ b/fs/sysfs/dir.c
67672@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67673 kfree(buf);
67674 }
67675
67676+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67677+extern int grsec_enable_sysfs_restrict;
67678+#endif
67679+
67680 /**
67681 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
67682 * @kobj: object we're creating directory for
67683@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67684 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67685 {
67686 struct kernfs_node *parent, *kn;
67687+ const char *name;
67688+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67689+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67690+ const char *parent_name;
67691+#endif
67692
67693 BUG_ON(!kobj);
67694
67695+ name = kobject_name(kobj);
67696+
67697 if (kobj->parent)
67698 parent = kobj->parent->sd;
67699 else
67700@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67701 if (!parent)
67702 return -ENOENT;
67703
67704- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67705- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67706+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67707+ parent_name = parent->name;
67708+ mode = S_IRWXU;
67709+
67710+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67711+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67712+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67713+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67714+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67715+ if (!grsec_enable_sysfs_restrict)
67716+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67717+#endif
67718+
67719+ kn = kernfs_create_dir_ns(parent, name,
67720+ mode, kobj, ns);
67721 if (IS_ERR(kn)) {
67722 if (PTR_ERR(kn) == -EEXIST)
67723- sysfs_warn_dup(parent, kobject_name(kobj));
67724+ sysfs_warn_dup(parent, name);
67725 return PTR_ERR(kn);
67726 }
67727
67728diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67729index 69d4889..a810bd4 100644
67730--- a/fs/sysv/sysv.h
67731+++ b/fs/sysv/sysv.h
67732@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67733 #endif
67734 }
67735
67736-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67737+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67738 {
67739 if (sbi->s_bytesex == BYTESEX_PDP)
67740 return PDP_swab((__force __u32)n);
67741diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67742index fb08b0c..65fcc7e 100644
67743--- a/fs/ubifs/io.c
67744+++ b/fs/ubifs/io.c
67745@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67746 return err;
67747 }
67748
67749-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67750+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67751 {
67752 int err;
67753
67754diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67755index c175b4d..8f36a16 100644
67756--- a/fs/udf/misc.c
67757+++ b/fs/udf/misc.c
67758@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67759
67760 u8 udf_tag_checksum(const struct tag *t)
67761 {
67762- u8 *data = (u8 *)t;
67763+ const u8 *data = (const u8 *)t;
67764 u8 checksum = 0;
67765 int i;
67766 for (i = 0; i < sizeof(struct tag); ++i)
67767diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67768index 8d974c4..b82f6ec 100644
67769--- a/fs/ufs/swab.h
67770+++ b/fs/ufs/swab.h
67771@@ -22,7 +22,7 @@ enum {
67772 BYTESEX_BE
67773 };
67774
67775-static inline u64
67776+static inline u64 __intentional_overflow(-1)
67777 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67778 {
67779 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67780@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67781 return (__force __fs64)cpu_to_be64(n);
67782 }
67783
67784-static inline u32
67785+static inline u32 __intentional_overflow(-1)
67786 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67787 {
67788 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67789diff --git a/fs/utimes.c b/fs/utimes.c
67790index aa138d6..5f3a811 100644
67791--- a/fs/utimes.c
67792+++ b/fs/utimes.c
67793@@ -1,6 +1,7 @@
67794 #include <linux/compiler.h>
67795 #include <linux/file.h>
67796 #include <linux/fs.h>
67797+#include <linux/security.h>
67798 #include <linux/linkage.h>
67799 #include <linux/mount.h>
67800 #include <linux/namei.h>
67801@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67802 }
67803 }
67804 retry_deleg:
67805+
67806+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67807+ error = -EACCES;
67808+ goto mnt_drop_write_and_out;
67809+ }
67810+
67811 mutex_lock(&inode->i_mutex);
67812 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67813 mutex_unlock(&inode->i_mutex);
67814diff --git a/fs/xattr.c b/fs/xattr.c
67815index 4ef6985..a6cd6567 100644
67816--- a/fs/xattr.c
67817+++ b/fs/xattr.c
67818@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67819 return rc;
67820 }
67821
67822+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67823+ssize_t
67824+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67825+{
67826+ struct inode *inode = dentry->d_inode;
67827+ ssize_t error;
67828+
67829+ error = inode_permission(inode, MAY_EXEC);
67830+ if (error)
67831+ return error;
67832+
67833+ if (inode->i_op->getxattr)
67834+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67835+ else
67836+ error = -EOPNOTSUPP;
67837+
67838+ return error;
67839+}
67840+EXPORT_SYMBOL(pax_getxattr);
67841+#endif
67842+
67843 ssize_t
67844 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67845 {
67846@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67847 * Extended attribute SET operations
67848 */
67849 static long
67850-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67851+setxattr(struct path *path, const char __user *name, const void __user *value,
67852 size_t size, int flags)
67853 {
67854 int error;
67855@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67856 posix_acl_fix_xattr_from_user(kvalue, size);
67857 }
67858
67859- error = vfs_setxattr(d, kname, kvalue, size, flags);
67860+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67861+ error = -EACCES;
67862+ goto out;
67863+ }
67864+
67865+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67866 out:
67867 if (vvalue)
67868 vfree(vvalue);
67869@@ -376,7 +402,7 @@ retry:
67870 return error;
67871 error = mnt_want_write(path.mnt);
67872 if (!error) {
67873- error = setxattr(path.dentry, name, value, size, flags);
67874+ error = setxattr(&path, name, value, size, flags);
67875 mnt_drop_write(path.mnt);
67876 }
67877 path_put(&path);
67878@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67879 audit_file(f.file);
67880 error = mnt_want_write_file(f.file);
67881 if (!error) {
67882- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67883+ error = setxattr(&f.file->f_path, name, value, size, flags);
67884 mnt_drop_write_file(f.file);
67885 }
67886 fdput(f);
67887@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67888 * Extended attribute REMOVE operations
67889 */
67890 static long
67891-removexattr(struct dentry *d, const char __user *name)
67892+removexattr(struct path *path, const char __user *name)
67893 {
67894 int error;
67895 char kname[XATTR_NAME_MAX + 1];
67896@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67897 if (error < 0)
67898 return error;
67899
67900- return vfs_removexattr(d, kname);
67901+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67902+ return -EACCES;
67903+
67904+ return vfs_removexattr(path->dentry, kname);
67905 }
67906
67907 static int path_removexattr(const char __user *pathname,
67908@@ -623,7 +652,7 @@ retry:
67909 return error;
67910 error = mnt_want_write(path.mnt);
67911 if (!error) {
67912- error = removexattr(path.dentry, name);
67913+ error = removexattr(&path, name);
67914 mnt_drop_write(path.mnt);
67915 }
67916 path_put(&path);
67917@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67918 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67919 {
67920 struct fd f = fdget(fd);
67921+ struct path *path;
67922 int error = -EBADF;
67923
67924 if (!f.file)
67925 return error;
67926+ path = &f.file->f_path;
67927 audit_file(f.file);
67928 error = mnt_want_write_file(f.file);
67929 if (!error) {
67930- error = removexattr(f.file->f_path.dentry, name);
67931+ error = removexattr(path, name);
67932 mnt_drop_write_file(f.file);
67933 }
67934 fdput(f);
67935diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
67936index 4e20fe7..6d1a55a 100644
67937--- a/fs/xfs/libxfs/xfs_bmap.c
67938+++ b/fs/xfs/libxfs/xfs_bmap.c
67939@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
67940
67941 #else
67942 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
67943-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
67944+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
67945 #endif /* DEBUG */
67946
67947 /*
67948diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
67949index 098cd78..724d3f8 100644
67950--- a/fs/xfs/xfs_dir2_readdir.c
67951+++ b/fs/xfs/xfs_dir2_readdir.c
67952@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
67953 ino = dp->d_ops->sf_get_ino(sfp, sfep);
67954 filetype = dp->d_ops->sf_get_ftype(sfep);
67955 ctx->pos = off & 0x7fffffff;
67956- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67957+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
67958+ char name[sfep->namelen];
67959+ memcpy(name, sfep->name, sfep->namelen);
67960+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
67961+ return 0;
67962+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67963 xfs_dir3_get_dtype(dp->i_mount, filetype)))
67964 return 0;
67965 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
67966diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
67967index a183198..6b52f52 100644
67968--- a/fs/xfs/xfs_ioctl.c
67969+++ b/fs/xfs/xfs_ioctl.c
67970@@ -119,7 +119,7 @@ xfs_find_handle(
67971 }
67972
67973 error = -EFAULT;
67974- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
67975+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
67976 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
67977 goto out_put;
67978
67979diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
67980index c31d2c2..6ec8f62 100644
67981--- a/fs/xfs/xfs_linux.h
67982+++ b/fs/xfs/xfs_linux.h
67983@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
67984 * of the compiler which do not like us using do_div in the middle
67985 * of large functions.
67986 */
67987-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67988+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67989 {
67990 __u32 mod;
67991
67992@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
67993 return 0;
67994 }
67995 #else
67996-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67997+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67998 {
67999 __u32 mod;
68000
68001diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68002new file mode 100644
68003index 0000000..31f8fe4
68004--- /dev/null
68005+++ b/grsecurity/Kconfig
68006@@ -0,0 +1,1182 @@
68007+#
68008+# grecurity configuration
68009+#
68010+menu "Memory Protections"
68011+depends on GRKERNSEC
68012+
68013+config GRKERNSEC_KMEM
68014+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68015+ default y if GRKERNSEC_CONFIG_AUTO
68016+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68017+ help
68018+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68019+ be written to or read from to modify or leak the contents of the running
68020+ kernel. /dev/port will also not be allowed to be opened, writing to
68021+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68022+ If you have module support disabled, enabling this will close up several
68023+ ways that are currently used to insert malicious code into the running
68024+ kernel.
68025+
68026+ Even with this feature enabled, we still highly recommend that
68027+ you use the RBAC system, as it is still possible for an attacker to
68028+ modify the running kernel through other more obscure methods.
68029+
68030+ It is highly recommended that you say Y here if you meet all the
68031+ conditions above.
68032+
68033+config GRKERNSEC_VM86
68034+ bool "Restrict VM86 mode"
68035+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68036+ depends on X86_32
68037+
68038+ help
68039+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68040+ make use of a special execution mode on 32bit x86 processors called
68041+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68042+ video cards and will still work with this option enabled. The purpose
68043+ of the option is to prevent exploitation of emulation errors in
68044+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68045+ Nearly all users should be able to enable this option.
68046+
68047+config GRKERNSEC_IO
68048+ bool "Disable privileged I/O"
68049+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68050+ depends on X86
68051+ select RTC_CLASS
68052+ select RTC_INTF_DEV
68053+ select RTC_DRV_CMOS
68054+
68055+ help
68056+ If you say Y here, all ioperm and iopl calls will return an error.
68057+ Ioperm and iopl can be used to modify the running kernel.
68058+ Unfortunately, some programs need this access to operate properly,
68059+ the most notable of which are XFree86 and hwclock. hwclock can be
68060+ remedied by having RTC support in the kernel, so real-time
68061+ clock support is enabled if this option is enabled, to ensure
68062+ that hwclock operates correctly. If hwclock still does not work,
68063+ either update udev or symlink /dev/rtc to /dev/rtc0.
68064+
68065+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68066+ you may not be able to boot into a graphical environment with this
68067+ option enabled. In this case, you should use the RBAC system instead.
68068+
68069+config GRKERNSEC_BPF_HARDEN
68070+ bool "Harden BPF interpreter"
68071+ default y if GRKERNSEC_CONFIG_AUTO
68072+ help
68073+ Unlike previous versions of grsecurity that hardened both the BPF
68074+ interpreted code against corruption at rest as well as the JIT code
68075+ against JIT-spray attacks and attacker-controlled immediate values
68076+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68077+ and will ensure the interpreted code is read-only at rest. This feature
68078+ may be removed at a later time when eBPF stabilizes to entirely revert
68079+ back to the more secure pre-3.16 BPF interpreter/JIT.
68080+
68081+ If you're using KERNEXEC, it's recommended that you enable this option
68082+ to supplement the hardening of the kernel.
68083+
68084+config GRKERNSEC_PERF_HARDEN
68085+ bool "Disable unprivileged PERF_EVENTS usage by default"
68086+ default y if GRKERNSEC_CONFIG_AUTO
68087+ depends on PERF_EVENTS
68088+ help
68089+ If you say Y here, the range of acceptable values for the
68090+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68091+ default to a new value: 3. When the sysctl is set to this value, no
68092+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68093+
68094+ Though PERF_EVENTS can be used legitimately for performance monitoring
68095+ and low-level application profiling, it is forced on regardless of
68096+ configuration, has been at fault for several vulnerabilities, and
68097+ creates new opportunities for side channels and other information leaks.
68098+
68099+ This feature puts PERF_EVENTS into a secure default state and permits
68100+ the administrator to change out of it temporarily if unprivileged
68101+ application profiling is needed.
68102+
68103+config GRKERNSEC_RAND_THREADSTACK
68104+ bool "Insert random gaps between thread stacks"
68105+ default y if GRKERNSEC_CONFIG_AUTO
68106+ depends on PAX_RANDMMAP && !PPC
68107+ help
68108+ If you say Y here, a random-sized gap will be enforced between allocated
68109+ thread stacks. Glibc's NPTL and other threading libraries that
68110+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68111+ The implementation currently provides 8 bits of entropy for the gap.
68112+
68113+ Many distributions do not compile threaded remote services with the
68114+ -fstack-check argument to GCC, causing the variable-sized stack-based
68115+ allocator, alloca(), to not probe the stack on allocation. This
68116+ permits an unbounded alloca() to skip over any guard page and potentially
68117+ modify another thread's stack reliably. An enforced random gap
68118+ reduces the reliability of such an attack and increases the chance
68119+ that such a read/write to another thread's stack instead lands in
68120+ an unmapped area, causing a crash and triggering grsecurity's
68121+ anti-bruteforcing logic.
68122+
68123+config GRKERNSEC_PROC_MEMMAP
68124+ bool "Harden ASLR against information leaks and entropy reduction"
68125+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68126+ depends on PAX_NOEXEC || PAX_ASLR
68127+ help
68128+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68129+ give no information about the addresses of its mappings if
68130+ PaX features that rely on random addresses are enabled on the task.
68131+ In addition to sanitizing this information and disabling other
68132+ dangerous sources of information, this option causes reads of sensitive
68133+ /proc/<pid> entries where the file descriptor was opened in a different
68134+ task than the one performing the read. Such attempts are logged.
68135+ This option also limits argv/env strings for suid/sgid binaries
68136+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68137+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68138+ binaries to prevent alternative mmap layouts from being abused.
68139+
68140+ If you use PaX it is essential that you say Y here as it closes up
68141+ several holes that make full ASLR useless locally.
68142+
68143+
68144+config GRKERNSEC_KSTACKOVERFLOW
68145+ bool "Prevent kernel stack overflows"
68146+ default y if GRKERNSEC_CONFIG_AUTO
68147+ depends on !IA64 && 64BIT
68148+ help
68149+ If you say Y here, the kernel's process stacks will be allocated
68150+ with vmalloc instead of the kernel's default allocator. This
68151+ introduces guard pages that in combination with the alloca checking
68152+ of the STACKLEAK feature prevents all forms of kernel process stack
68153+ overflow abuse. Note that this is different from kernel stack
68154+ buffer overflows.
68155+
68156+config GRKERNSEC_BRUTE
68157+ bool "Deter exploit bruteforcing"
68158+ default y if GRKERNSEC_CONFIG_AUTO
68159+ help
68160+ If you say Y here, attempts to bruteforce exploits against forking
68161+ daemons such as apache or sshd, as well as against suid/sgid binaries
68162+ will be deterred. When a child of a forking daemon is killed by PaX
68163+ or crashes due to an illegal instruction or other suspicious signal,
68164+ the parent process will be delayed 30 seconds upon every subsequent
68165+ fork until the administrator is able to assess the situation and
68166+ restart the daemon.
68167+ In the suid/sgid case, the attempt is logged, the user has all their
68168+ existing instances of the suid/sgid binary terminated and will
68169+ be unable to execute any suid/sgid binaries for 15 minutes.
68170+
68171+ It is recommended that you also enable signal logging in the auditing
68172+ section so that logs are generated when a process triggers a suspicious
68173+ signal.
68174+ If the sysctl option is enabled, a sysctl option with name
68175+ "deter_bruteforce" is created.
68176+
68177+config GRKERNSEC_MODHARDEN
68178+ bool "Harden module auto-loading"
68179+ default y if GRKERNSEC_CONFIG_AUTO
68180+ depends on MODULES
68181+ help
68182+ If you say Y here, module auto-loading in response to use of some
68183+ feature implemented by an unloaded module will be restricted to
68184+ root users. Enabling this option helps defend against attacks
68185+ by unprivileged users who abuse the auto-loading behavior to
68186+ cause a vulnerable module to load that is then exploited.
68187+
68188+ If this option prevents a legitimate use of auto-loading for a
68189+ non-root user, the administrator can execute modprobe manually
68190+ with the exact name of the module mentioned in the alert log.
68191+ Alternatively, the administrator can add the module to the list
68192+ of modules loaded at boot by modifying init scripts.
68193+
68194+ Modification of init scripts will most likely be needed on
68195+ Ubuntu servers with encrypted home directory support enabled,
68196+ as the first non-root user logging in will cause the ecb(aes),
68197+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68198+
68199+config GRKERNSEC_HIDESYM
68200+ bool "Hide kernel symbols"
68201+ default y if GRKERNSEC_CONFIG_AUTO
68202+ select PAX_USERCOPY_SLABS
68203+ help
68204+ If you say Y here, getting information on loaded modules, and
68205+ displaying all kernel symbols through a syscall will be restricted
68206+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68207+ /proc/kallsyms will be restricted to the root user. The RBAC
68208+ system can hide that entry even from root.
68209+
68210+ This option also prevents leaking of kernel addresses through
68211+ several /proc entries.
68212+
68213+ Note that this option is only effective provided the following
68214+ conditions are met:
68215+ 1) The kernel using grsecurity is not precompiled by some distribution
68216+ 2) You have also enabled GRKERNSEC_DMESG
68217+ 3) You are using the RBAC system and hiding other files such as your
68218+ kernel image and System.map. Alternatively, enabling this option
68219+ causes the permissions on /boot, /lib/modules, and the kernel
68220+ source directory to change at compile time to prevent
68221+ reading by non-root users.
68222+ If the above conditions are met, this option will aid in providing a
68223+ useful protection against local kernel exploitation of overflows
68224+ and arbitrary read/write vulnerabilities.
68225+
68226+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68227+ in addition to this feature.
68228+
68229+config GRKERNSEC_RANDSTRUCT
68230+ bool "Randomize layout of sensitive kernel structures"
68231+ default y if GRKERNSEC_CONFIG_AUTO
68232+ select GRKERNSEC_HIDESYM
68233+ select MODVERSIONS if MODULES
68234+ help
68235+ If you say Y here, the layouts of a number of sensitive kernel
68236+ structures (task, fs, cred, etc) and all structures composed entirely
68237+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68238+ This can introduce the requirement of an additional infoleak
68239+ vulnerability for exploits targeting these structure types.
68240+
68241+ Enabling this feature will introduce some performance impact, slightly
68242+ increase memory usage, and prevent the use of forensic tools like
68243+ Volatility against the system (unless the kernel source tree isn't
68244+ cleaned after kernel installation).
68245+
68246+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68247+ It remains after a make clean to allow for external modules to be compiled
68248+ with the existing seed and will be removed by a make mrproper or
68249+ make distclean.
68250+
68251+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68252+ to install the supporting headers explicitly in addition to the normal
68253+ gcc package.
68254+
68255+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68256+ bool "Use cacheline-aware structure randomization"
68257+ depends on GRKERNSEC_RANDSTRUCT
68258+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68259+ help
68260+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68261+ at restricting randomization to cacheline-sized groups of elements. It
68262+ will further not randomize bitfields in structures. This reduces the
68263+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68264+
68265+config GRKERNSEC_KERN_LOCKOUT
68266+ bool "Active kernel exploit response"
68267+ default y if GRKERNSEC_CONFIG_AUTO
68268+ depends on X86 || ARM || PPC || SPARC
68269+ help
68270+ If you say Y here, when a PaX alert is triggered due to suspicious
68271+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68272+ or an OOPS occurs due to bad memory accesses, instead of just
68273+ terminating the offending process (and potentially allowing
68274+ a subsequent exploit from the same user), we will take one of two
68275+ actions:
68276+ If the user was root, we will panic the system
68277+ If the user was non-root, we will log the attempt, terminate
68278+ all processes owned by the user, then prevent them from creating
68279+ any new processes until the system is restarted
68280+ This deters repeated kernel exploitation/bruteforcing attempts
68281+ and is useful for later forensics.
68282+
68283+config GRKERNSEC_OLD_ARM_USERLAND
68284+ bool "Old ARM userland compatibility"
68285+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68286+ help
68287+ If you say Y here, stubs of executable code to perform such operations
68288+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68289+ table. This is unfortunately needed for old ARM userland meant to run
68290+ across a wide range of processors. Without this option enabled,
68291+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68292+ which is enough for Linaro userlands or other userlands designed for v6
68293+ and newer ARM CPUs. It's recommended that you try without this option enabled
68294+ first, and only enable it if your userland does not boot (it will likely fail
68295+ at init time).
68296+
68297+endmenu
68298+menu "Role Based Access Control Options"
68299+depends on GRKERNSEC
68300+
68301+config GRKERNSEC_RBAC_DEBUG
68302+ bool
68303+
68304+config GRKERNSEC_NO_RBAC
68305+ bool "Disable RBAC system"
68306+ help
68307+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68308+ preventing the RBAC system from being enabled. You should only say Y
68309+ here if you have no intention of using the RBAC system, so as to prevent
68310+ an attacker with root access from misusing the RBAC system to hide files
68311+ and processes when loadable module support and /dev/[k]mem have been
68312+ locked down.
68313+
68314+config GRKERNSEC_ACL_HIDEKERN
68315+ bool "Hide kernel processes"
68316+ help
68317+ If you say Y here, all kernel threads will be hidden to all
68318+ processes but those whose subject has the "view hidden processes"
68319+ flag.
68320+
68321+config GRKERNSEC_ACL_MAXTRIES
68322+ int "Maximum tries before password lockout"
68323+ default 3
68324+ help
68325+ This option enforces the maximum number of times a user can attempt
68326+ to authorize themselves with the grsecurity RBAC system before being
68327+ denied the ability to attempt authorization again for a specified time.
68328+ The lower the number, the harder it will be to brute-force a password.
68329+
68330+config GRKERNSEC_ACL_TIMEOUT
68331+ int "Time to wait after max password tries, in seconds"
68332+ default 30
68333+ help
68334+ This option specifies the time the user must wait after attempting to
68335+ authorize to the RBAC system with the maximum number of invalid
68336+ passwords. The higher the number, the harder it will be to brute-force
68337+ a password.
68338+
68339+endmenu
68340+menu "Filesystem Protections"
68341+depends on GRKERNSEC
68342+
68343+config GRKERNSEC_PROC
68344+ bool "Proc restrictions"
68345+ default y if GRKERNSEC_CONFIG_AUTO
68346+ help
68347+ If you say Y here, the permissions of the /proc filesystem
68348+ will be altered to enhance system security and privacy. You MUST
68349+ choose either a user only restriction or a user and group restriction.
68350+ Depending upon the option you choose, you can either restrict users to
68351+ see only the processes they themselves run, or choose a group that can
68352+ view all processes and files normally restricted to root if you choose
68353+ the "restrict to user only" option. NOTE: If you're running identd or
68354+ ntpd as a non-root user, you will have to run it as the group you
68355+ specify here.
68356+
68357+config GRKERNSEC_PROC_USER
68358+ bool "Restrict /proc to user only"
68359+ depends on GRKERNSEC_PROC
68360+ help
68361+ If you say Y here, non-root users will only be able to view their own
68362+ processes, and restricts them from viewing network-related information,
68363+ and viewing kernel symbol and module information.
68364+
68365+config GRKERNSEC_PROC_USERGROUP
68366+ bool "Allow special group"
68367+ default y if GRKERNSEC_CONFIG_AUTO
68368+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68369+ help
68370+ If you say Y here, you will be able to select a group that will be
68371+ able to view all processes and network-related information. If you've
68372+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68373+ remain hidden. This option is useful if you want to run identd as
68374+ a non-root user. The group you select may also be chosen at boot time
68375+ via "grsec_proc_gid=" on the kernel commandline.
68376+
68377+config GRKERNSEC_PROC_GID
68378+ int "GID for special group"
68379+ depends on GRKERNSEC_PROC_USERGROUP
68380+ default 1001
68381+
68382+config GRKERNSEC_PROC_ADD
68383+ bool "Additional restrictions"
68384+ default y if GRKERNSEC_CONFIG_AUTO
68385+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68386+ help
68387+ If you say Y here, additional restrictions will be placed on
68388+ /proc that keep normal users from viewing device information and
68389+ slabinfo information that could be useful for exploits.
68390+
68391+config GRKERNSEC_LINK
68392+ bool "Linking restrictions"
68393+ default y if GRKERNSEC_CONFIG_AUTO
68394+ help
68395+ If you say Y here, /tmp race exploits will be prevented, since users
68396+ will no longer be able to follow symlinks owned by other users in
68397+ world-writable +t directories (e.g. /tmp), unless the owner of the
68398+ symlink is the owner of the directory. users will also not be
68399+ able to hardlink to files they do not own. If the sysctl option is
68400+ enabled, a sysctl option with name "linking_restrictions" is created.
68401+
68402+config GRKERNSEC_SYMLINKOWN
68403+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68404+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68405+ help
68406+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68407+ that prevents it from being used as a security feature. As Apache
68408+ verifies the symlink by performing a stat() against the target of
68409+ the symlink before it is followed, an attacker can setup a symlink
68410+ to point to a same-owned file, then replace the symlink with one
68411+ that targets another user's file just after Apache "validates" the
68412+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68413+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68414+ will be in place for the group you specify. If the sysctl option
68415+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68416+ created.
68417+
68418+config GRKERNSEC_SYMLINKOWN_GID
68419+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68420+ depends on GRKERNSEC_SYMLINKOWN
68421+ default 1006
68422+ help
68423+ Setting this GID determines what group kernel-enforced
68424+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68425+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68426+
68427+config GRKERNSEC_FIFO
68428+ bool "FIFO restrictions"
68429+ default y if GRKERNSEC_CONFIG_AUTO
68430+ help
68431+ If you say Y here, users will not be able to write to FIFOs they don't
68432+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68433+ the FIFO is the same owner of the directory it's held in. If the sysctl
68434+ option is enabled, a sysctl option with name "fifo_restrictions" is
68435+ created.
68436+
68437+config GRKERNSEC_SYSFS_RESTRICT
68438+ bool "Sysfs/debugfs restriction"
68439+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68440+ depends on SYSFS
68441+ help
68442+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68443+ any filesystem normally mounted under it (e.g. debugfs) will be
68444+ mostly accessible only by root. These filesystems generally provide access
68445+ to hardware and debug information that isn't appropriate for unprivileged
68446+ users of the system. Sysfs and debugfs have also become a large source
68447+ of new vulnerabilities, ranging from infoleaks to local compromise.
68448+ There has been very little oversight with an eye toward security involved
68449+ in adding new exporters of information to these filesystems, so their
68450+ use is discouraged.
68451+ For reasons of compatibility, a few directories have been whitelisted
68452+ for access by non-root users:
68453+ /sys/fs/selinux
68454+ /sys/fs/fuse
68455+ /sys/devices/system/cpu
68456+
68457+config GRKERNSEC_ROFS
68458+ bool "Runtime read-only mount protection"
68459+ depends on SYSCTL
68460+ help
68461+ If you say Y here, a sysctl option with name "romount_protect" will
68462+ be created. By setting this option to 1 at runtime, filesystems
68463+ will be protected in the following ways:
68464+ * No new writable mounts will be allowed
68465+ * Existing read-only mounts won't be able to be remounted read/write
68466+ * Write operations will be denied on all block devices
68467+ This option acts independently of grsec_lock: once it is set to 1,
68468+ it cannot be turned off. Therefore, please be mindful of the resulting
68469+ behavior if this option is enabled in an init script on a read-only
68470+ filesystem.
68471+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68472+ and GRKERNSEC_IO should be enabled and module loading disabled via
68473+ config or at runtime.
68474+ This feature is mainly intended for secure embedded systems.
68475+
68476+
68477+config GRKERNSEC_DEVICE_SIDECHANNEL
68478+ bool "Eliminate stat/notify-based device sidechannels"
68479+ default y if GRKERNSEC_CONFIG_AUTO
68480+ help
68481+ If you say Y here, timing analyses on block or character
68482+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68483+ will be thwarted for unprivileged users. If a process without
68484+ CAP_MKNOD stats such a device, the last access and last modify times
68485+ will match the device's create time. No access or modify events
68486+ will be triggered through inotify/dnotify/fanotify for such devices.
68487+ This feature will prevent attacks that may at a minimum
68488+ allow an attacker to determine the administrator's password length.
68489+
68490+config GRKERNSEC_CHROOT
68491+ bool "Chroot jail restrictions"
68492+ default y if GRKERNSEC_CONFIG_AUTO
68493+ help
68494+ If you say Y here, you will be able to choose several options that will
68495+ make breaking out of a chrooted jail much more difficult. If you
68496+ encounter no software incompatibilities with the following options, it
68497+ is recommended that you enable each one.
68498+
68499+ Note that the chroot restrictions are not intended to apply to "chroots"
68500+ to directories that are simple bind mounts of the global root filesystem.
68501+ For several other reasons, a user shouldn't expect any significant
68502+ security by performing such a chroot.
68503+
68504+config GRKERNSEC_CHROOT_MOUNT
68505+ bool "Deny mounts"
68506+ default y if GRKERNSEC_CONFIG_AUTO
68507+ depends on GRKERNSEC_CHROOT
68508+ help
68509+ If you say Y here, processes inside a chroot will not be able to
68510+ mount or remount filesystems. If the sysctl option is enabled, a
68511+ sysctl option with name "chroot_deny_mount" is created.
68512+
68513+config GRKERNSEC_CHROOT_DOUBLE
68514+ bool "Deny double-chroots"
68515+ default y if GRKERNSEC_CONFIG_AUTO
68516+ depends on GRKERNSEC_CHROOT
68517+ help
68518+ If you say Y here, processes inside a chroot will not be able to chroot
68519+ again outside the chroot. This is a widely used method of breaking
68520+ out of a chroot jail and should not be allowed. If the sysctl
68521+ option is enabled, a sysctl option with name
68522+ "chroot_deny_chroot" is created.
68523+
68524+config GRKERNSEC_CHROOT_PIVOT
68525+ bool "Deny pivot_root in chroot"
68526+ default y if GRKERNSEC_CONFIG_AUTO
68527+ depends on GRKERNSEC_CHROOT
68528+ help
68529+ If you say Y here, processes inside a chroot will not be able to use
68530+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68531+ works similar to chroot in that it changes the root filesystem. This
68532+ function could be misused in a chrooted process to attempt to break out
68533+ of the chroot, and therefore should not be allowed. If the sysctl
68534+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68535+ created.
68536+
68537+config GRKERNSEC_CHROOT_CHDIR
68538+ bool "Enforce chdir(\"/\") on all chroots"
68539+ default y if GRKERNSEC_CONFIG_AUTO
68540+ depends on GRKERNSEC_CHROOT
68541+ help
68542+ If you say Y here, the current working directory of all newly-chrooted
68543+ applications will be set to the the root directory of the chroot.
68544+ The man page on chroot(2) states:
68545+ Note that this call does not change the current working
68546+ directory, so that `.' can be outside the tree rooted at
68547+ `/'. In particular, the super-user can escape from a
68548+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68549+
68550+ It is recommended that you say Y here, since it's not known to break
68551+ any software. If the sysctl option is enabled, a sysctl option with
68552+ name "chroot_enforce_chdir" is created.
68553+
68554+config GRKERNSEC_CHROOT_CHMOD
68555+ bool "Deny (f)chmod +s"
68556+ default y if GRKERNSEC_CONFIG_AUTO
68557+ depends on GRKERNSEC_CHROOT
68558+ help
68559+ If you say Y here, processes inside a chroot will not be able to chmod
68560+ or fchmod files to make them have suid or sgid bits. This protects
68561+ against another published method of breaking a chroot. If the sysctl
68562+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68563+ created.
68564+
68565+config GRKERNSEC_CHROOT_FCHDIR
68566+ bool "Deny fchdir and fhandle out of chroot"
68567+ default y if GRKERNSEC_CONFIG_AUTO
68568+ depends on GRKERNSEC_CHROOT
68569+ help
68570+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68571+ to a file descriptor of the chrooting process that points to a directory
68572+ outside the filesystem will be stopped. Additionally, this option prevents
68573+ use of the recently-created syscall for opening files by a guessable "file
68574+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68575+ with name "chroot_deny_fchdir" is created.
68576+
68577+config GRKERNSEC_CHROOT_MKNOD
68578+ bool "Deny mknod"
68579+ default y if GRKERNSEC_CONFIG_AUTO
68580+ depends on GRKERNSEC_CHROOT
68581+ help
68582+ If you say Y here, processes inside a chroot will not be allowed to
68583+ mknod. The problem with using mknod inside a chroot is that it
68584+ would allow an attacker to create a device entry that is the same
68585+ as one on the physical root of your system, which could range from
68586+ anything from the console device to a device for your harddrive (which
68587+ they could then use to wipe the drive or steal data). It is recommended
68588+ that you say Y here, unless you run into software incompatibilities.
68589+ If the sysctl option is enabled, a sysctl option with name
68590+ "chroot_deny_mknod" is created.
68591+
68592+config GRKERNSEC_CHROOT_SHMAT
68593+ bool "Deny shmat() out of chroot"
68594+ default y if GRKERNSEC_CONFIG_AUTO
68595+ depends on GRKERNSEC_CHROOT
68596+ help
68597+ If you say Y here, processes inside a chroot will not be able to attach
68598+ to shared memory segments that were created outside of the chroot jail.
68599+ It is recommended that you say Y here. If the sysctl option is enabled,
68600+ a sysctl option with name "chroot_deny_shmat" is created.
68601+
68602+config GRKERNSEC_CHROOT_UNIX
68603+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68604+ default y if GRKERNSEC_CONFIG_AUTO
68605+ depends on GRKERNSEC_CHROOT
68606+ help
68607+ If you say Y here, processes inside a chroot will not be able to
68608+ connect to abstract (meaning not belonging to a filesystem) Unix
68609+ domain sockets that were bound outside of a chroot. It is recommended
68610+ that you say Y here. If the sysctl option is enabled, a sysctl option
68611+ with name "chroot_deny_unix" is created.
68612+
68613+config GRKERNSEC_CHROOT_FINDTASK
68614+ bool "Protect outside processes"
68615+ default y if GRKERNSEC_CONFIG_AUTO
68616+ depends on GRKERNSEC_CHROOT
68617+ help
68618+ If you say Y here, processes inside a chroot will not be able to
68619+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68620+ getsid, or view any process outside of the chroot. If the sysctl
68621+ option is enabled, a sysctl option with name "chroot_findtask" is
68622+ created.
68623+
68624+config GRKERNSEC_CHROOT_NICE
68625+ bool "Restrict priority changes"
68626+ default y if GRKERNSEC_CONFIG_AUTO
68627+ depends on GRKERNSEC_CHROOT
68628+ help
68629+ If you say Y here, processes inside a chroot will not be able to raise
68630+ the priority of processes in the chroot, or alter the priority of
68631+ processes outside the chroot. This provides more security than simply
68632+ removing CAP_SYS_NICE from the process' capability set. If the
68633+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68634+ is created.
68635+
68636+config GRKERNSEC_CHROOT_SYSCTL
68637+ bool "Deny sysctl writes"
68638+ default y if GRKERNSEC_CONFIG_AUTO
68639+ depends on GRKERNSEC_CHROOT
68640+ help
68641+ If you say Y here, an attacker in a chroot will not be able to
68642+ write to sysctl entries, either by sysctl(2) or through a /proc
68643+ interface. It is strongly recommended that you say Y here. If the
68644+ sysctl option is enabled, a sysctl option with name
68645+ "chroot_deny_sysctl" is created.
68646+
68647+config GRKERNSEC_CHROOT_RENAME
68648+ bool "Deny bad renames"
68649+ default y if GRKERNSEC_CONFIG_AUTO
68650+ depends on GRKERNSEC_CHROOT
68651+ help
68652+ If you say Y here, an attacker in a chroot will not be able to
68653+ abuse the ability to create double chroots to break out of the
68654+ chroot by exploiting a race condition between a rename of a directory
68655+ within a chroot against an open of a symlink with relative path
68656+ components. This feature will likewise prevent an accomplice outside
68657+ a chroot from enabling a user inside the chroot to break out and make
68658+ use of their credentials on the global filesystem. Enabling this
68659+ feature is essential to prevent root users from breaking out of a
68660+ chroot. If the sysctl option is enabled, a sysctl option with name
68661+ "chroot_deny_bad_rename" is created.
68662+
68663+config GRKERNSEC_CHROOT_CAPS
68664+ bool "Capability restrictions"
68665+ default y if GRKERNSEC_CONFIG_AUTO
68666+ depends on GRKERNSEC_CHROOT
68667+ help
68668+ If you say Y here, the capabilities on all processes within a
68669+ chroot jail will be lowered to stop module insertion, raw i/o,
68670+ system and net admin tasks, rebooting the system, modifying immutable
68671+ files, modifying IPC owned by another, and changing the system time.
68672+ This is left an option because it can break some apps. Disable this
68673+ if your chrooted apps are having problems performing those kinds of
68674+ tasks. If the sysctl option is enabled, a sysctl option with
68675+ name "chroot_caps" is created.
68676+
68677+config GRKERNSEC_CHROOT_INITRD
68678+ bool "Exempt initrd tasks from restrictions"
68679+ default y if GRKERNSEC_CONFIG_AUTO
68680+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68681+ help
68682+ If you say Y here, tasks started prior to init will be exempted from
68683+ grsecurity's chroot restrictions. This option is mainly meant to
68684+ resolve Plymouth's performing privileged operations unnecessarily
68685+ in a chroot.
68686+
68687+endmenu
68688+menu "Kernel Auditing"
68689+depends on GRKERNSEC
68690+
68691+config GRKERNSEC_AUDIT_GROUP
68692+ bool "Single group for auditing"
68693+ help
68694+ If you say Y here, the exec and chdir logging features will only operate
68695+ on a group you specify. This option is recommended if you only want to
68696+ watch certain users instead of having a large amount of logs from the
68697+ entire system. If the sysctl option is enabled, a sysctl option with
68698+ name "audit_group" is created.
68699+
68700+config GRKERNSEC_AUDIT_GID
68701+ int "GID for auditing"
68702+ depends on GRKERNSEC_AUDIT_GROUP
68703+ default 1007
68704+
68705+config GRKERNSEC_EXECLOG
68706+ bool "Exec logging"
68707+ help
68708+ If you say Y here, all execve() calls will be logged (since the
68709+ other exec*() calls are frontends to execve(), all execution
68710+ will be logged). Useful for shell-servers that like to keep track
68711+ of their users. If the sysctl option is enabled, a sysctl option with
68712+ name "exec_logging" is created.
68713+ WARNING: This option when enabled will produce a LOT of logs, especially
68714+ on an active system.
68715+
68716+config GRKERNSEC_RESLOG
68717+ bool "Resource logging"
68718+ default y if GRKERNSEC_CONFIG_AUTO
68719+ help
68720+ If you say Y here, all attempts to overstep resource limits will
68721+ be logged with the resource name, the requested size, and the current
68722+ limit. It is highly recommended that you say Y here. If the sysctl
68723+ option is enabled, a sysctl option with name "resource_logging" is
68724+ created. If the RBAC system is enabled, the sysctl value is ignored.
68725+
68726+config GRKERNSEC_CHROOT_EXECLOG
68727+ bool "Log execs within chroot"
68728+ help
68729+ If you say Y here, all executions inside a chroot jail will be logged
68730+ to syslog. This can cause a large amount of logs if certain
68731+ applications (eg. djb's daemontools) are installed on the system, and
68732+ is therefore left as an option. If the sysctl option is enabled, a
68733+ sysctl option with name "chroot_execlog" is created.
68734+
68735+config GRKERNSEC_AUDIT_PTRACE
68736+ bool "Ptrace logging"
68737+ help
68738+ If you say Y here, all attempts to attach to a process via ptrace
68739+ will be logged. If the sysctl option is enabled, a sysctl option
68740+ with name "audit_ptrace" is created.
68741+
68742+config GRKERNSEC_AUDIT_CHDIR
68743+ bool "Chdir logging"
68744+ help
68745+ If you say Y here, all chdir() calls will be logged. If the sysctl
68746+ option is enabled, a sysctl option with name "audit_chdir" is created.
68747+
68748+config GRKERNSEC_AUDIT_MOUNT
68749+ bool "(Un)Mount logging"
68750+ help
68751+ If you say Y here, all mounts and unmounts will be logged. If the
68752+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68753+ created.
68754+
68755+config GRKERNSEC_SIGNAL
68756+ bool "Signal logging"
68757+ default y if GRKERNSEC_CONFIG_AUTO
68758+ help
68759+ If you say Y here, certain important signals will be logged, such as
68760+ SIGSEGV, which will as a result inform you of when a error in a program
68761+ occurred, which in some cases could mean a possible exploit attempt.
68762+ If the sysctl option is enabled, a sysctl option with name
68763+ "signal_logging" is created.
68764+
68765+config GRKERNSEC_FORKFAIL
68766+ bool "Fork failure logging"
68767+ help
68768+ If you say Y here, all failed fork() attempts will be logged.
68769+ This could suggest a fork bomb, or someone attempting to overstep
68770+ their process limit. If the sysctl option is enabled, a sysctl option
68771+ with name "forkfail_logging" is created.
68772+
68773+config GRKERNSEC_TIME
68774+ bool "Time change logging"
68775+ default y if GRKERNSEC_CONFIG_AUTO
68776+ help
68777+ If you say Y here, any changes of the system clock will be logged.
68778+ If the sysctl option is enabled, a sysctl option with name
68779+ "timechange_logging" is created.
68780+
68781+config GRKERNSEC_PROC_IPADDR
68782+ bool "/proc/<pid>/ipaddr support"
68783+ default y if GRKERNSEC_CONFIG_AUTO
68784+ help
68785+ If you say Y here, a new entry will be added to each /proc/<pid>
68786+ directory that contains the IP address of the person using the task.
68787+ The IP is carried across local TCP and AF_UNIX stream sockets.
68788+ This information can be useful for IDS/IPSes to perform remote response
68789+ to a local attack. The entry is readable by only the owner of the
68790+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68791+ the RBAC system), and thus does not create privacy concerns.
68792+
68793+config GRKERNSEC_RWXMAP_LOG
68794+ bool 'Denied RWX mmap/mprotect logging'
68795+ default y if GRKERNSEC_CONFIG_AUTO
68796+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68797+ help
68798+ If you say Y here, calls to mmap() and mprotect() with explicit
68799+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68800+ denied by the PAX_MPROTECT feature. This feature will also
68801+ log other problematic scenarios that can occur when PAX_MPROTECT
68802+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68803+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68804+ is created.
68805+
68806+endmenu
68807+
68808+menu "Executable Protections"
68809+depends on GRKERNSEC
68810+
68811+config GRKERNSEC_DMESG
68812+ bool "Dmesg(8) restriction"
68813+ default y if GRKERNSEC_CONFIG_AUTO
68814+ help
68815+ If you say Y here, non-root users will not be able to use dmesg(8)
68816+ to view the contents of the kernel's circular log buffer.
68817+ The kernel's log buffer often contains kernel addresses and other
68818+ identifying information useful to an attacker in fingerprinting a
68819+ system for a targeted exploit.
68820+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68821+ created.
68822+
68823+config GRKERNSEC_HARDEN_PTRACE
68824+ bool "Deter ptrace-based process snooping"
68825+ default y if GRKERNSEC_CONFIG_AUTO
68826+ help
68827+ If you say Y here, TTY sniffers and other malicious monitoring
68828+ programs implemented through ptrace will be defeated. If you
68829+ have been using the RBAC system, this option has already been
68830+ enabled for several years for all users, with the ability to make
68831+ fine-grained exceptions.
68832+
68833+ This option only affects the ability of non-root users to ptrace
68834+ processes that are not a descendent of the ptracing process.
68835+ This means that strace ./binary and gdb ./binary will still work,
68836+ but attaching to arbitrary processes will not. If the sysctl
68837+ option is enabled, a sysctl option with name "harden_ptrace" is
68838+ created.
68839+
68840+config GRKERNSEC_PTRACE_READEXEC
68841+ bool "Require read access to ptrace sensitive binaries"
68842+ default y if GRKERNSEC_CONFIG_AUTO
68843+ help
68844+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68845+ binaries. This option is useful in environments that
68846+ remove the read bits (e.g. file mode 4711) from suid binaries to
68847+ prevent infoleaking of their contents. This option adds
68848+ consistency to the use of that file mode, as the binary could normally
68849+ be read out when run without privileges while ptracing.
68850+
68851+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68852+ is created.
68853+
68854+config GRKERNSEC_SETXID
68855+ bool "Enforce consistent multithreaded privileges"
68856+ default y if GRKERNSEC_CONFIG_AUTO
68857+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68858+ help
68859+ If you say Y here, a change from a root uid to a non-root uid
68860+ in a multithreaded application will cause the resulting uids,
68861+ gids, supplementary groups, and capabilities in that thread
68862+ to be propagated to the other threads of the process. In most
68863+ cases this is unnecessary, as glibc will emulate this behavior
68864+ on behalf of the application. Other libcs do not act in the
68865+ same way, allowing the other threads of the process to continue
68866+ running with root privileges. If the sysctl option is enabled,
68867+ a sysctl option with name "consistent_setxid" is created.
68868+
68869+config GRKERNSEC_HARDEN_IPC
68870+ bool "Disallow access to overly-permissive IPC objects"
68871+ default y if GRKERNSEC_CONFIG_AUTO
68872+ depends on SYSVIPC
68873+ help
68874+ If you say Y here, access to overly-permissive IPC objects (shared
68875+ memory, message queues, and semaphores) will be denied for processes
68876+ given the following criteria beyond normal permission checks:
68877+ 1) If the IPC object is world-accessible and the euid doesn't match
68878+ that of the creator or current uid for the IPC object
68879+ 2) If the IPC object is group-accessible and the egid doesn't
68880+ match that of the creator or current gid for the IPC object
68881+ It's a common error to grant too much permission to these objects,
68882+ with impact ranging from denial of service and information leaking to
68883+ privilege escalation. This feature was developed in response to
68884+ research by Tim Brown:
68885+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68886+ who found hundreds of such insecure usages. Processes with
68887+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68888+ If the sysctl option is enabled, a sysctl option with name
68889+ "harden_ipc" is created.
68890+
68891+config GRKERNSEC_TPE
68892+ bool "Trusted Path Execution (TPE)"
68893+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68894+ help
68895+ If you say Y here, you will be able to choose a gid to add to the
68896+ supplementary groups of users you want to mark as "untrusted."
68897+ These users will not be able to execute any files that are not in
68898+ root-owned directories writable only by root. If the sysctl option
68899+ is enabled, a sysctl option with name "tpe" is created.
68900+
68901+config GRKERNSEC_TPE_ALL
68902+ bool "Partially restrict all non-root users"
68903+ depends on GRKERNSEC_TPE
68904+ help
68905+ If you say Y here, all non-root users will be covered under
68906+ a weaker TPE restriction. This is separate from, and in addition to,
68907+ the main TPE options that you have selected elsewhere. Thus, if a
68908+ "trusted" GID is chosen, this restriction applies to even that GID.
68909+ Under this restriction, all non-root users will only be allowed to
68910+ execute files in directories they own that are not group or
68911+ world-writable, or in directories owned by root and writable only by
68912+ root. If the sysctl option is enabled, a sysctl option with name
68913+ "tpe_restrict_all" is created.
68914+
68915+config GRKERNSEC_TPE_INVERT
68916+ bool "Invert GID option"
68917+ depends on GRKERNSEC_TPE
68918+ help
68919+ If you say Y here, the group you specify in the TPE configuration will
68920+ decide what group TPE restrictions will be *disabled* for. This
68921+ option is useful if you want TPE restrictions to be applied to most
68922+ users on the system. If the sysctl option is enabled, a sysctl option
68923+ with name "tpe_invert" is created. Unlike other sysctl options, this
68924+ entry will default to on for backward-compatibility.
68925+
68926+config GRKERNSEC_TPE_GID
68927+ int
68928+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
68929+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
68930+
68931+config GRKERNSEC_TPE_UNTRUSTED_GID
68932+ int "GID for TPE-untrusted users"
68933+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
68934+ default 1005
68935+ help
68936+ Setting this GID determines what group TPE restrictions will be
68937+ *enabled* for. If the sysctl option is enabled, a sysctl option
68938+ with name "tpe_gid" is created.
68939+
68940+config GRKERNSEC_TPE_TRUSTED_GID
68941+ int "GID for TPE-trusted users"
68942+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
68943+ default 1005
68944+ help
68945+ Setting this GID determines what group TPE restrictions will be
68946+ *disabled* for. If the sysctl option is enabled, a sysctl option
68947+ with name "tpe_gid" is created.
68948+
68949+endmenu
68950+menu "Network Protections"
68951+depends on GRKERNSEC
68952+
68953+config GRKERNSEC_BLACKHOLE
68954+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
68955+ default y if GRKERNSEC_CONFIG_AUTO
68956+ depends on NET
68957+ help
68958+ If you say Y here, neither TCP resets nor ICMP
68959+ destination-unreachable packets will be sent in response to packets
68960+ sent to ports for which no associated listening process exists.
68961+ It will also prevent the sending of ICMP protocol unreachable packets
68962+ in response to packets with unknown protocols.
68963+ This feature supports both IPV4 and IPV6 and exempts the
68964+ loopback interface from blackholing. Enabling this feature
68965+ makes a host more resilient to DoS attacks and reduces network
68966+ visibility against scanners.
68967+
68968+ The blackhole feature as-implemented is equivalent to the FreeBSD
68969+ blackhole feature, as it prevents RST responses to all packets, not
68970+ just SYNs. Under most application behavior this causes no
68971+ problems, but applications (like haproxy) may not close certain
68972+ connections in a way that cleanly terminates them on the remote
68973+ end, leaving the remote host in LAST_ACK state. Because of this
68974+ side-effect and to prevent intentional LAST_ACK DoSes, this
68975+ feature also adds automatic mitigation against such attacks.
68976+ The mitigation drastically reduces the amount of time a socket
68977+ can spend in LAST_ACK state. If you're using haproxy and not
68978+ all servers it connects to have this option enabled, consider
68979+ disabling this feature on the haproxy host.
68980+
68981+ If the sysctl option is enabled, two sysctl options with names
68982+ "ip_blackhole" and "lastack_retries" will be created.
68983+ While "ip_blackhole" takes the standard zero/non-zero on/off
68984+ toggle, "lastack_retries" uses the same kinds of values as
68985+ "tcp_retries1" and "tcp_retries2". The default value of 4
68986+ prevents a socket from lasting more than 45 seconds in LAST_ACK
68987+ state.
68988+
68989+config GRKERNSEC_NO_SIMULT_CONNECT
68990+ bool "Disable TCP Simultaneous Connect"
68991+ default y if GRKERNSEC_CONFIG_AUTO
68992+ depends on NET
68993+ help
68994+ If you say Y here, a feature by Willy Tarreau will be enabled that
68995+ removes a weakness in Linux's strict implementation of TCP that
68996+ allows two clients to connect to each other without either entering
68997+ a listening state. The weakness allows an attacker to easily prevent
68998+ a client from connecting to a known server provided the source port
68999+ for the connection is guessed correctly.
69000+
69001+ As the weakness could be used to prevent an antivirus or IPS from
69002+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69003+ it should be eliminated by enabling this option. Though Linux is
69004+ one of few operating systems supporting simultaneous connect, it
69005+ has no legitimate use in practice and is rarely supported by firewalls.
69006+
69007+config GRKERNSEC_SOCKET
69008+ bool "Socket restrictions"
69009+ depends on NET
69010+ help
69011+ If you say Y here, you will be able to choose from several options.
69012+ If you assign a GID on your system and add it to the supplementary
69013+ groups of users you want to restrict socket access to, this patch
69014+ will perform up to three things, based on the option(s) you choose.
69015+
69016+config GRKERNSEC_SOCKET_ALL
69017+ bool "Deny any sockets to group"
69018+ depends on GRKERNSEC_SOCKET
69019+ help
69020+ If you say Y here, you will be able to choose a GID of whose users will
69021+ be unable to connect to other hosts from your machine or run server
69022+ applications from your machine. If the sysctl option is enabled, a
69023+ sysctl option with name "socket_all" is created.
69024+
69025+config GRKERNSEC_SOCKET_ALL_GID
69026+ int "GID to deny all sockets for"
69027+ depends on GRKERNSEC_SOCKET_ALL
69028+ default 1004
69029+ help
69030+ Here you can choose the GID to disable socket access for. Remember to
69031+ add the users you want socket access disabled for to the GID
69032+ specified here. If the sysctl option is enabled, a sysctl option
69033+ with name "socket_all_gid" is created.
69034+
69035+config GRKERNSEC_SOCKET_CLIENT
69036+ bool "Deny client sockets to group"
69037+ depends on GRKERNSEC_SOCKET
69038+ help
69039+ If you say Y here, you will be able to choose a GID of whose users will
69040+ be unable to connect to other hosts from your machine, but will be
69041+ able to run servers. If this option is enabled, all users in the group
69042+ you specify will have to use passive mode when initiating ftp transfers
69043+ from the shell on your machine. If the sysctl option is enabled, a
69044+ sysctl option with name "socket_client" is created.
69045+
69046+config GRKERNSEC_SOCKET_CLIENT_GID
69047+ int "GID to deny client sockets for"
69048+ depends on GRKERNSEC_SOCKET_CLIENT
69049+ default 1003
69050+ help
69051+ Here you can choose the GID to disable client socket access for.
69052+ Remember to add the users you want client socket access disabled for to
69053+ the GID specified here. If the sysctl option is enabled, a sysctl
69054+ option with name "socket_client_gid" is created.
69055+
69056+config GRKERNSEC_SOCKET_SERVER
69057+ bool "Deny server sockets to group"
69058+ depends on GRKERNSEC_SOCKET
69059+ help
69060+ If you say Y here, you will be able to choose a GID of whose users will
69061+ be unable to run server applications from your machine. If the sysctl
69062+ option is enabled, a sysctl option with name "socket_server" is created.
69063+
69064+config GRKERNSEC_SOCKET_SERVER_GID
69065+ int "GID to deny server sockets for"
69066+ depends on GRKERNSEC_SOCKET_SERVER
69067+ default 1002
69068+ help
69069+ Here you can choose the GID to disable server socket access for.
69070+ Remember to add the users you want server socket access disabled for to
69071+ the GID specified here. If the sysctl option is enabled, a sysctl
69072+ option with name "socket_server_gid" is created.
69073+
69074+endmenu
69075+
69076+menu "Physical Protections"
69077+depends on GRKERNSEC
69078+
69079+config GRKERNSEC_DENYUSB
69080+ bool "Deny new USB connections after toggle"
69081+ default y if GRKERNSEC_CONFIG_AUTO
69082+ depends on SYSCTL && USB_SUPPORT
69083+ help
69084+ If you say Y here, a new sysctl option with name "deny_new_usb"
69085+ will be created. Setting its value to 1 will prevent any new
69086+ USB devices from being recognized by the OS. Any attempted USB
69087+ device insertion will be logged. This option is intended to be
69088+ used against custom USB devices designed to exploit vulnerabilities
69089+ in various USB device drivers.
69090+
69091+ For greatest effectiveness, this sysctl should be set after any
69092+ relevant init scripts. This option is safe to enable in distros
69093+ as each user can choose whether or not to toggle the sysctl.
69094+
69095+config GRKERNSEC_DENYUSB_FORCE
69096+ bool "Reject all USB devices not connected at boot"
69097+ select USB
69098+ depends on GRKERNSEC_DENYUSB
69099+ help
69100+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69101+ that doesn't involve a sysctl entry. This option should only be
69102+ enabled if you're sure you want to deny all new USB connections
69103+ at runtime and don't want to modify init scripts. This should not
69104+ be enabled by distros. It forces the core USB code to be built
69105+ into the kernel image so that all devices connected at boot time
69106+ can be recognized and new USB device connections can be prevented
69107+ prior to init running.
69108+
69109+endmenu
69110+
69111+menu "Sysctl Support"
69112+depends on GRKERNSEC && SYSCTL
69113+
69114+config GRKERNSEC_SYSCTL
69115+ bool "Sysctl support"
69116+ default y if GRKERNSEC_CONFIG_AUTO
69117+ help
69118+ If you say Y here, you will be able to change the options that
69119+ grsecurity runs with at bootup, without having to recompile your
69120+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69121+ to enable (1) or disable (0) various features. All the sysctl entries
69122+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69123+ All features enabled in the kernel configuration are disabled at boot
69124+ if you do not say Y to the "Turn on features by default" option.
69125+ All options should be set at startup, and the grsec_lock entry should
69126+ be set to a non-zero value after all the options are set.
69127+ *THIS IS EXTREMELY IMPORTANT*
69128+
69129+config GRKERNSEC_SYSCTL_DISTRO
69130+ bool "Extra sysctl support for distro makers (READ HELP)"
69131+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69132+ help
69133+ If you say Y here, additional sysctl options will be created
69134+ for features that affect processes running as root. Therefore,
69135+ it is critical when using this option that the grsec_lock entry be
69136+ enabled after boot. Only distros with prebuilt kernel packages
69137+ with this option enabled that can ensure grsec_lock is enabled
69138+ after boot should use this option.
69139+ *Failure to set grsec_lock after boot makes all grsec features
69140+ this option covers useless*
69141+
69142+ Currently this option creates the following sysctl entries:
69143+ "Disable Privileged I/O": "disable_priv_io"
69144+
69145+config GRKERNSEC_SYSCTL_ON
69146+ bool "Turn on features by default"
69147+ default y if GRKERNSEC_CONFIG_AUTO
69148+ depends on GRKERNSEC_SYSCTL
69149+ help
69150+ If you say Y here, instead of having all features enabled in the
69151+ kernel configuration disabled at boot time, the features will be
69152+ enabled at boot time. It is recommended you say Y here unless
69153+ there is some reason you would want all sysctl-tunable features to
69154+ be disabled by default. As mentioned elsewhere, it is important
69155+ to enable the grsec_lock entry once you have finished modifying
69156+ the sysctl entries.
69157+
69158+endmenu
69159+menu "Logging Options"
69160+depends on GRKERNSEC
69161+
69162+config GRKERNSEC_FLOODTIME
69163+ int "Seconds in between log messages (minimum)"
69164+ default 10
69165+ help
69166+ This option allows you to enforce the number of seconds between
69167+ grsecurity log messages. The default should be suitable for most
69168+ people, however, if you choose to change it, choose a value small enough
69169+ to allow informative logs to be produced, but large enough to
69170+ prevent flooding.
69171+
69172+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69173+ any rate limiting on grsecurity log messages.
69174+
69175+config GRKERNSEC_FLOODBURST
69176+ int "Number of messages in a burst (maximum)"
69177+ default 6
69178+ help
69179+ This option allows you to choose the maximum number of messages allowed
69180+ within the flood time interval you chose in a separate option. The
69181+ default should be suitable for most people, however if you find that
69182+ many of your logs are being interpreted as flooding, you may want to
69183+ raise this value.
69184+
69185+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69186+ any rate limiting on grsecurity log messages.
69187+
69188+endmenu
69189diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69190new file mode 100644
69191index 0000000..30ababb
69192--- /dev/null
69193+++ b/grsecurity/Makefile
69194@@ -0,0 +1,54 @@
69195+# grsecurity – access control and security hardening for Linux
69196+# All code in this directory and various hooks located throughout the Linux kernel are
69197+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69198+# http://www.grsecurity.net spender@grsecurity.net
69199+#
69200+# This program is free software; you can redistribute it and/or
69201+# modify it under the terms of the GNU General Public License version 2
69202+# as published by the Free Software Foundation.
69203+#
69204+# This program is distributed in the hope that it will be useful,
69205+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69206+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69207+# GNU General Public License for more details.
69208+#
69209+# You should have received a copy of the GNU General Public License
69210+# along with this program; if not, write to the Free Software
69211+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69212+
69213+KBUILD_CFLAGS += -Werror
69214+
69215+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69216+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69217+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69218+ grsec_usb.o grsec_ipc.o grsec_proc.o
69219+
69220+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69221+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69222+ gracl_learn.o grsec_log.o gracl_policy.o
69223+ifdef CONFIG_COMPAT
69224+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69225+endif
69226+
69227+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69228+
69229+ifdef CONFIG_NET
69230+obj-y += grsec_sock.o
69231+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69232+endif
69233+
69234+ifndef CONFIG_GRKERNSEC
69235+obj-y += grsec_disabled.o
69236+endif
69237+
69238+ifdef CONFIG_GRKERNSEC_HIDESYM
69239+extra-y := grsec_hidesym.o
69240+$(obj)/grsec_hidesym.o:
69241+ @-chmod -f 500 /boot
69242+ @-chmod -f 500 /lib/modules
69243+ @-chmod -f 500 /lib64/modules
69244+ @-chmod -f 500 /lib32/modules
69245+ @-chmod -f 700 .
69246+ @-chmod -f 700 $(objtree)
69247+ @echo ' grsec: protected kernel image paths'
69248+endif
69249diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69250new file mode 100644
69251index 0000000..6c1e154
69252--- /dev/null
69253+++ b/grsecurity/gracl.c
69254@@ -0,0 +1,2749 @@
69255+#include <linux/kernel.h>
69256+#include <linux/module.h>
69257+#include <linux/sched.h>
69258+#include <linux/mm.h>
69259+#include <linux/file.h>
69260+#include <linux/fs.h>
69261+#include <linux/namei.h>
69262+#include <linux/mount.h>
69263+#include <linux/tty.h>
69264+#include <linux/proc_fs.h>
69265+#include <linux/lglock.h>
69266+#include <linux/slab.h>
69267+#include <linux/vmalloc.h>
69268+#include <linux/types.h>
69269+#include <linux/sysctl.h>
69270+#include <linux/netdevice.h>
69271+#include <linux/ptrace.h>
69272+#include <linux/gracl.h>
69273+#include <linux/gralloc.h>
69274+#include <linux/security.h>
69275+#include <linux/grinternal.h>
69276+#include <linux/pid_namespace.h>
69277+#include <linux/stop_machine.h>
69278+#include <linux/fdtable.h>
69279+#include <linux/percpu.h>
69280+#include <linux/lglock.h>
69281+#include <linux/hugetlb.h>
69282+#include <linux/posix-timers.h>
69283+#include <linux/prefetch.h>
69284+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69285+#include <linux/magic.h>
69286+#include <linux/pagemap.h>
69287+#include "../fs/btrfs/async-thread.h"
69288+#include "../fs/btrfs/ctree.h"
69289+#include "../fs/btrfs/btrfs_inode.h"
69290+#endif
69291+#include "../fs/mount.h"
69292+
69293+#include <asm/uaccess.h>
69294+#include <asm/errno.h>
69295+#include <asm/mman.h>
69296+
69297+#define FOR_EACH_ROLE_START(role) \
69298+ role = running_polstate.role_list; \
69299+ while (role) {
69300+
69301+#define FOR_EACH_ROLE_END(role) \
69302+ role = role->prev; \
69303+ }
69304+
69305+extern struct path gr_real_root;
69306+
69307+static struct gr_policy_state running_polstate;
69308+struct gr_policy_state *polstate = &running_polstate;
69309+extern struct gr_alloc_state *current_alloc_state;
69310+
69311+extern char *gr_shared_page[4];
69312+DEFINE_RWLOCK(gr_inode_lock);
69313+
69314+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69315+
69316+#ifdef CONFIG_NET
69317+extern struct vfsmount *sock_mnt;
69318+#endif
69319+
69320+extern struct vfsmount *pipe_mnt;
69321+extern struct vfsmount *shm_mnt;
69322+
69323+#ifdef CONFIG_HUGETLBFS
69324+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69325+#endif
69326+
69327+extern u16 acl_sp_role_value;
69328+extern struct acl_object_label *fakefs_obj_rw;
69329+extern struct acl_object_label *fakefs_obj_rwx;
69330+
69331+int gr_acl_is_enabled(void)
69332+{
69333+ return (gr_status & GR_READY);
69334+}
69335+
69336+void gr_enable_rbac_system(void)
69337+{
69338+ pax_open_kernel();
69339+ gr_status |= GR_READY;
69340+ pax_close_kernel();
69341+}
69342+
69343+int gr_rbac_disable(void *unused)
69344+{
69345+ pax_open_kernel();
69346+ gr_status &= ~GR_READY;
69347+ pax_close_kernel();
69348+
69349+ return 0;
69350+}
69351+
69352+static inline dev_t __get_dev(const struct dentry *dentry)
69353+{
69354+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69355+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69356+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69357+ else
69358+#endif
69359+ return dentry->d_sb->s_dev;
69360+}
69361+
69362+static inline u64 __get_ino(const struct dentry *dentry)
69363+{
69364+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69365+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69366+ return btrfs_ino(dentry->d_inode);
69367+ else
69368+#endif
69369+ return dentry->d_inode->i_ino;
69370+}
69371+
69372+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69373+{
69374+ return __get_dev(dentry);
69375+}
69376+
69377+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69378+{
69379+ return __get_ino(dentry);
69380+}
69381+
69382+static char gr_task_roletype_to_char(struct task_struct *task)
69383+{
69384+ switch (task->role->roletype &
69385+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69386+ GR_ROLE_SPECIAL)) {
69387+ case GR_ROLE_DEFAULT:
69388+ return 'D';
69389+ case GR_ROLE_USER:
69390+ return 'U';
69391+ case GR_ROLE_GROUP:
69392+ return 'G';
69393+ case GR_ROLE_SPECIAL:
69394+ return 'S';
69395+ }
69396+
69397+ return 'X';
69398+}
69399+
69400+char gr_roletype_to_char(void)
69401+{
69402+ return gr_task_roletype_to_char(current);
69403+}
69404+
69405+__inline__ int
69406+gr_acl_tpe_check(void)
69407+{
69408+ if (unlikely(!(gr_status & GR_READY)))
69409+ return 0;
69410+ if (current->role->roletype & GR_ROLE_TPE)
69411+ return 1;
69412+ else
69413+ return 0;
69414+}
69415+
69416+int
69417+gr_handle_rawio(const struct inode *inode)
69418+{
69419+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69420+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69421+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69422+ !capable(CAP_SYS_RAWIO))
69423+ return 1;
69424+#endif
69425+ return 0;
69426+}
69427+
69428+int
69429+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69430+{
69431+ if (likely(lena != lenb))
69432+ return 0;
69433+
69434+ return !memcmp(a, b, lena);
69435+}
69436+
69437+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69438+{
69439+ *buflen -= namelen;
69440+ if (*buflen < 0)
69441+ return -ENAMETOOLONG;
69442+ *buffer -= namelen;
69443+ memcpy(*buffer, str, namelen);
69444+ return 0;
69445+}
69446+
69447+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69448+{
69449+ return prepend(buffer, buflen, name->name, name->len);
69450+}
69451+
69452+static int prepend_path(const struct path *path, struct path *root,
69453+ char **buffer, int *buflen)
69454+{
69455+ struct dentry *dentry = path->dentry;
69456+ struct vfsmount *vfsmnt = path->mnt;
69457+ struct mount *mnt = real_mount(vfsmnt);
69458+ bool slash = false;
69459+ int error = 0;
69460+
69461+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69462+ struct dentry * parent;
69463+
69464+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69465+ /* Global root? */
69466+ if (!mnt_has_parent(mnt)) {
69467+ goto out;
69468+ }
69469+ dentry = mnt->mnt_mountpoint;
69470+ mnt = mnt->mnt_parent;
69471+ vfsmnt = &mnt->mnt;
69472+ continue;
69473+ }
69474+ parent = dentry->d_parent;
69475+ prefetch(parent);
69476+ spin_lock(&dentry->d_lock);
69477+ error = prepend_name(buffer, buflen, &dentry->d_name);
69478+ spin_unlock(&dentry->d_lock);
69479+ if (!error)
69480+ error = prepend(buffer, buflen, "/", 1);
69481+ if (error)
69482+ break;
69483+
69484+ slash = true;
69485+ dentry = parent;
69486+ }
69487+
69488+out:
69489+ if (!error && !slash)
69490+ error = prepend(buffer, buflen, "/", 1);
69491+
69492+ return error;
69493+}
69494+
69495+/* this must be called with mount_lock and rename_lock held */
69496+
69497+static char *__our_d_path(const struct path *path, struct path *root,
69498+ char *buf, int buflen)
69499+{
69500+ char *res = buf + buflen;
69501+ int error;
69502+
69503+ prepend(&res, &buflen, "\0", 1);
69504+ error = prepend_path(path, root, &res, &buflen);
69505+ if (error)
69506+ return ERR_PTR(error);
69507+
69508+ return res;
69509+}
69510+
69511+static char *
69512+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69513+{
69514+ char *retval;
69515+
69516+ retval = __our_d_path(path, root, buf, buflen);
69517+ if (unlikely(IS_ERR(retval)))
69518+ retval = strcpy(buf, "<path too long>");
69519+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69520+ retval[1] = '\0';
69521+
69522+ return retval;
69523+}
69524+
69525+static char *
69526+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69527+ char *buf, int buflen)
69528+{
69529+ struct path path;
69530+ char *res;
69531+
69532+ path.dentry = (struct dentry *)dentry;
69533+ path.mnt = (struct vfsmount *)vfsmnt;
69534+
69535+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69536+ by the RBAC system */
69537+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69538+
69539+ return res;
69540+}
69541+
69542+static char *
69543+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69544+ char *buf, int buflen)
69545+{
69546+ char *res;
69547+ struct path path;
69548+ struct path root;
69549+ struct task_struct *reaper = init_pid_ns.child_reaper;
69550+
69551+ path.dentry = (struct dentry *)dentry;
69552+ path.mnt = (struct vfsmount *)vfsmnt;
69553+
69554+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69555+ get_fs_root(reaper->fs, &root);
69556+
69557+ read_seqlock_excl(&mount_lock);
69558+ write_seqlock(&rename_lock);
69559+ res = gen_full_path(&path, &root, buf, buflen);
69560+ write_sequnlock(&rename_lock);
69561+ read_sequnlock_excl(&mount_lock);
69562+
69563+ path_put(&root);
69564+ return res;
69565+}
69566+
69567+char *
69568+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69569+{
69570+ char *ret;
69571+ read_seqlock_excl(&mount_lock);
69572+ write_seqlock(&rename_lock);
69573+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69574+ PAGE_SIZE);
69575+ write_sequnlock(&rename_lock);
69576+ read_sequnlock_excl(&mount_lock);
69577+ return ret;
69578+}
69579+
69580+static char *
69581+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69582+{
69583+ char *ret;
69584+ char *buf;
69585+ int buflen;
69586+
69587+ read_seqlock_excl(&mount_lock);
69588+ write_seqlock(&rename_lock);
69589+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69590+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69591+ buflen = (int)(ret - buf);
69592+ if (buflen >= 5)
69593+ prepend(&ret, &buflen, "/proc", 5);
69594+ else
69595+ ret = strcpy(buf, "<path too long>");
69596+ write_sequnlock(&rename_lock);
69597+ read_sequnlock_excl(&mount_lock);
69598+ return ret;
69599+}
69600+
69601+char *
69602+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69603+{
69604+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69605+ PAGE_SIZE);
69606+}
69607+
69608+char *
69609+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69610+{
69611+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69612+ PAGE_SIZE);
69613+}
69614+
69615+char *
69616+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69617+{
69618+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69619+ PAGE_SIZE);
69620+}
69621+
69622+char *
69623+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69624+{
69625+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69626+ PAGE_SIZE);
69627+}
69628+
69629+char *
69630+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69631+{
69632+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69633+ PAGE_SIZE);
69634+}
69635+
69636+__inline__ __u32
69637+to_gr_audit(const __u32 reqmode)
69638+{
69639+ /* masks off auditable permission flags, then shifts them to create
69640+ auditing flags, and adds the special case of append auditing if
69641+ we're requesting write */
69642+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69643+}
69644+
69645+struct acl_role_label *
69646+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69647+ const gid_t gid)
69648+{
69649+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69650+ struct acl_role_label *match;
69651+ struct role_allowed_ip *ipp;
69652+ unsigned int x;
69653+ u32 curr_ip = task->signal->saved_ip;
69654+
69655+ match = state->acl_role_set.r_hash[index];
69656+
69657+ while (match) {
69658+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69659+ for (x = 0; x < match->domain_child_num; x++) {
69660+ if (match->domain_children[x] == uid)
69661+ goto found;
69662+ }
69663+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69664+ break;
69665+ match = match->next;
69666+ }
69667+found:
69668+ if (match == NULL) {
69669+ try_group:
69670+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69671+ match = state->acl_role_set.r_hash[index];
69672+
69673+ while (match) {
69674+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69675+ for (x = 0; x < match->domain_child_num; x++) {
69676+ if (match->domain_children[x] == gid)
69677+ goto found2;
69678+ }
69679+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69680+ break;
69681+ match = match->next;
69682+ }
69683+found2:
69684+ if (match == NULL)
69685+ match = state->default_role;
69686+ if (match->allowed_ips == NULL)
69687+ return match;
69688+ else {
69689+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69690+ if (likely
69691+ ((ntohl(curr_ip) & ipp->netmask) ==
69692+ (ntohl(ipp->addr) & ipp->netmask)))
69693+ return match;
69694+ }
69695+ match = state->default_role;
69696+ }
69697+ } else if (match->allowed_ips == NULL) {
69698+ return match;
69699+ } else {
69700+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69701+ if (likely
69702+ ((ntohl(curr_ip) & ipp->netmask) ==
69703+ (ntohl(ipp->addr) & ipp->netmask)))
69704+ return match;
69705+ }
69706+ goto try_group;
69707+ }
69708+
69709+ return match;
69710+}
69711+
69712+static struct acl_role_label *
69713+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69714+ const gid_t gid)
69715+{
69716+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69717+}
69718+
69719+struct acl_subject_label *
69720+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69721+ const struct acl_role_label *role)
69722+{
69723+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69724+ struct acl_subject_label *match;
69725+
69726+ match = role->subj_hash[index];
69727+
69728+ while (match && (match->inode != ino || match->device != dev ||
69729+ (match->mode & GR_DELETED))) {
69730+ match = match->next;
69731+ }
69732+
69733+ if (match && !(match->mode & GR_DELETED))
69734+ return match;
69735+ else
69736+ return NULL;
69737+}
69738+
69739+struct acl_subject_label *
69740+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69741+ const struct acl_role_label *role)
69742+{
69743+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69744+ struct acl_subject_label *match;
69745+
69746+ match = role->subj_hash[index];
69747+
69748+ while (match && (match->inode != ino || match->device != dev ||
69749+ !(match->mode & GR_DELETED))) {
69750+ match = match->next;
69751+ }
69752+
69753+ if (match && (match->mode & GR_DELETED))
69754+ return match;
69755+ else
69756+ return NULL;
69757+}
69758+
69759+static struct acl_object_label *
69760+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69761+ const struct acl_subject_label *subj)
69762+{
69763+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69764+ struct acl_object_label *match;
69765+
69766+ match = subj->obj_hash[index];
69767+
69768+ while (match && (match->inode != ino || match->device != dev ||
69769+ (match->mode & GR_DELETED))) {
69770+ match = match->next;
69771+ }
69772+
69773+ if (match && !(match->mode & GR_DELETED))
69774+ return match;
69775+ else
69776+ return NULL;
69777+}
69778+
69779+static struct acl_object_label *
69780+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69781+ const struct acl_subject_label *subj)
69782+{
69783+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69784+ struct acl_object_label *match;
69785+
69786+ match = subj->obj_hash[index];
69787+
69788+ while (match && (match->inode != ino || match->device != dev ||
69789+ !(match->mode & GR_DELETED))) {
69790+ match = match->next;
69791+ }
69792+
69793+ if (match && (match->mode & GR_DELETED))
69794+ return match;
69795+
69796+ match = subj->obj_hash[index];
69797+
69798+ while (match && (match->inode != ino || match->device != dev ||
69799+ (match->mode & GR_DELETED))) {
69800+ match = match->next;
69801+ }
69802+
69803+ if (match && !(match->mode & GR_DELETED))
69804+ return match;
69805+ else
69806+ return NULL;
69807+}
69808+
69809+struct name_entry *
69810+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69811+{
69812+ unsigned int len = strlen(name);
69813+ unsigned int key = full_name_hash(name, len);
69814+ unsigned int index = key % state->name_set.n_size;
69815+ struct name_entry *match;
69816+
69817+ match = state->name_set.n_hash[index];
69818+
69819+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69820+ match = match->next;
69821+
69822+ return match;
69823+}
69824+
69825+static struct name_entry *
69826+lookup_name_entry(const char *name)
69827+{
69828+ return __lookup_name_entry(&running_polstate, name);
69829+}
69830+
69831+static struct name_entry *
69832+lookup_name_entry_create(const char *name)
69833+{
69834+ unsigned int len = strlen(name);
69835+ unsigned int key = full_name_hash(name, len);
69836+ unsigned int index = key % running_polstate.name_set.n_size;
69837+ struct name_entry *match;
69838+
69839+ match = running_polstate.name_set.n_hash[index];
69840+
69841+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69842+ !match->deleted))
69843+ match = match->next;
69844+
69845+ if (match && match->deleted)
69846+ return match;
69847+
69848+ match = running_polstate.name_set.n_hash[index];
69849+
69850+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69851+ match->deleted))
69852+ match = match->next;
69853+
69854+ if (match && !match->deleted)
69855+ return match;
69856+ else
69857+ return NULL;
69858+}
69859+
69860+static struct inodev_entry *
69861+lookup_inodev_entry(const u64 ino, const dev_t dev)
69862+{
69863+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69864+ struct inodev_entry *match;
69865+
69866+ match = running_polstate.inodev_set.i_hash[index];
69867+
69868+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69869+ match = match->next;
69870+
69871+ return match;
69872+}
69873+
69874+void
69875+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69876+{
69877+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69878+ state->inodev_set.i_size);
69879+ struct inodev_entry **curr;
69880+
69881+ entry->prev = NULL;
69882+
69883+ curr = &state->inodev_set.i_hash[index];
69884+ if (*curr != NULL)
69885+ (*curr)->prev = entry;
69886+
69887+ entry->next = *curr;
69888+ *curr = entry;
69889+
69890+ return;
69891+}
69892+
69893+static void
69894+insert_inodev_entry(struct inodev_entry *entry)
69895+{
69896+ __insert_inodev_entry(&running_polstate, entry);
69897+}
69898+
69899+void
69900+insert_acl_obj_label(struct acl_object_label *obj,
69901+ struct acl_subject_label *subj)
69902+{
69903+ unsigned int index =
69904+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69905+ struct acl_object_label **curr;
69906+
69907+ obj->prev = NULL;
69908+
69909+ curr = &subj->obj_hash[index];
69910+ if (*curr != NULL)
69911+ (*curr)->prev = obj;
69912+
69913+ obj->next = *curr;
69914+ *curr = obj;
69915+
69916+ return;
69917+}
69918+
69919+void
69920+insert_acl_subj_label(struct acl_subject_label *obj,
69921+ struct acl_role_label *role)
69922+{
69923+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
69924+ struct acl_subject_label **curr;
69925+
69926+ obj->prev = NULL;
69927+
69928+ curr = &role->subj_hash[index];
69929+ if (*curr != NULL)
69930+ (*curr)->prev = obj;
69931+
69932+ obj->next = *curr;
69933+ *curr = obj;
69934+
69935+ return;
69936+}
69937+
69938+/* derived from glibc fnmatch() 0: match, 1: no match*/
69939+
69940+static int
69941+glob_match(const char *p, const char *n)
69942+{
69943+ char c;
69944+
69945+ while ((c = *p++) != '\0') {
69946+ switch (c) {
69947+ case '?':
69948+ if (*n == '\0')
69949+ return 1;
69950+ else if (*n == '/')
69951+ return 1;
69952+ break;
69953+ case '\\':
69954+ if (*n != c)
69955+ return 1;
69956+ break;
69957+ case '*':
69958+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
69959+ if (*n == '/')
69960+ return 1;
69961+ else if (c == '?') {
69962+ if (*n == '\0')
69963+ return 1;
69964+ else
69965+ ++n;
69966+ }
69967+ }
69968+ if (c == '\0') {
69969+ return 0;
69970+ } else {
69971+ const char *endp;
69972+
69973+ if ((endp = strchr(n, '/')) == NULL)
69974+ endp = n + strlen(n);
69975+
69976+ if (c == '[') {
69977+ for (--p; n < endp; ++n)
69978+ if (!glob_match(p, n))
69979+ return 0;
69980+ } else if (c == '/') {
69981+ while (*n != '\0' && *n != '/')
69982+ ++n;
69983+ if (*n == '/' && !glob_match(p, n + 1))
69984+ return 0;
69985+ } else {
69986+ for (--p; n < endp; ++n)
69987+ if (*n == c && !glob_match(p, n))
69988+ return 0;
69989+ }
69990+
69991+ return 1;
69992+ }
69993+ case '[':
69994+ {
69995+ int not;
69996+ char cold;
69997+
69998+ if (*n == '\0' || *n == '/')
69999+ return 1;
70000+
70001+ not = (*p == '!' || *p == '^');
70002+ if (not)
70003+ ++p;
70004+
70005+ c = *p++;
70006+ for (;;) {
70007+ unsigned char fn = (unsigned char)*n;
70008+
70009+ if (c == '\0')
70010+ return 1;
70011+ else {
70012+ if (c == fn)
70013+ goto matched;
70014+ cold = c;
70015+ c = *p++;
70016+
70017+ if (c == '-' && *p != ']') {
70018+ unsigned char cend = *p++;
70019+
70020+ if (cend == '\0')
70021+ return 1;
70022+
70023+ if (cold <= fn && fn <= cend)
70024+ goto matched;
70025+
70026+ c = *p++;
70027+ }
70028+ }
70029+
70030+ if (c == ']')
70031+ break;
70032+ }
70033+ if (!not)
70034+ return 1;
70035+ break;
70036+ matched:
70037+ while (c != ']') {
70038+ if (c == '\0')
70039+ return 1;
70040+
70041+ c = *p++;
70042+ }
70043+ if (not)
70044+ return 1;
70045+ }
70046+ break;
70047+ default:
70048+ if (c != *n)
70049+ return 1;
70050+ }
70051+
70052+ ++n;
70053+ }
70054+
70055+ if (*n == '\0')
70056+ return 0;
70057+
70058+ if (*n == '/')
70059+ return 0;
70060+
70061+ return 1;
70062+}
70063+
70064+static struct acl_object_label *
70065+chk_glob_label(struct acl_object_label *globbed,
70066+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70067+{
70068+ struct acl_object_label *tmp;
70069+
70070+ if (*path == NULL)
70071+ *path = gr_to_filename_nolock(dentry, mnt);
70072+
70073+ tmp = globbed;
70074+
70075+ while (tmp) {
70076+ if (!glob_match(tmp->filename, *path))
70077+ return tmp;
70078+ tmp = tmp->next;
70079+ }
70080+
70081+ return NULL;
70082+}
70083+
70084+static struct acl_object_label *
70085+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70086+ const u64 curr_ino, const dev_t curr_dev,
70087+ const struct acl_subject_label *subj, char **path, const int checkglob)
70088+{
70089+ struct acl_subject_label *tmpsubj;
70090+ struct acl_object_label *retval;
70091+ struct acl_object_label *retval2;
70092+
70093+ tmpsubj = (struct acl_subject_label *) subj;
70094+ read_lock(&gr_inode_lock);
70095+ do {
70096+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70097+ if (retval) {
70098+ if (checkglob && retval->globbed) {
70099+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70100+ if (retval2)
70101+ retval = retval2;
70102+ }
70103+ break;
70104+ }
70105+ } while ((tmpsubj = tmpsubj->parent_subject));
70106+ read_unlock(&gr_inode_lock);
70107+
70108+ return retval;
70109+}
70110+
70111+static __inline__ struct acl_object_label *
70112+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70113+ struct dentry *curr_dentry,
70114+ const struct acl_subject_label *subj, char **path, const int checkglob)
70115+{
70116+ int newglob = checkglob;
70117+ u64 inode;
70118+ dev_t device;
70119+
70120+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70121+ as we don't want a / * rule to match instead of the / object
70122+ don't do this for create lookups that call this function though, since they're looking up
70123+ on the parent and thus need globbing checks on all paths
70124+ */
70125+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70126+ newglob = GR_NO_GLOB;
70127+
70128+ spin_lock(&curr_dentry->d_lock);
70129+ inode = __get_ino(curr_dentry);
70130+ device = __get_dev(curr_dentry);
70131+ spin_unlock(&curr_dentry->d_lock);
70132+
70133+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70134+}
70135+
70136+#ifdef CONFIG_HUGETLBFS
70137+static inline bool
70138+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70139+{
70140+ int i;
70141+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70142+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70143+ return true;
70144+ }
70145+
70146+ return false;
70147+}
70148+#endif
70149+
70150+static struct acl_object_label *
70151+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70152+ const struct acl_subject_label *subj, char *path, const int checkglob)
70153+{
70154+ struct dentry *dentry = (struct dentry *) l_dentry;
70155+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70156+ struct mount *real_mnt = real_mount(mnt);
70157+ struct acl_object_label *retval;
70158+ struct dentry *parent;
70159+
70160+ read_seqlock_excl(&mount_lock);
70161+ write_seqlock(&rename_lock);
70162+
70163+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70164+#ifdef CONFIG_NET
70165+ mnt == sock_mnt ||
70166+#endif
70167+#ifdef CONFIG_HUGETLBFS
70168+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70169+#endif
70170+ /* ignore Eric Biederman */
70171+ IS_PRIVATE(l_dentry->d_inode))) {
70172+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70173+ goto out;
70174+ }
70175+
70176+ for (;;) {
70177+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70178+ break;
70179+
70180+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70181+ if (!mnt_has_parent(real_mnt))
70182+ break;
70183+
70184+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70185+ if (retval != NULL)
70186+ goto out;
70187+
70188+ dentry = real_mnt->mnt_mountpoint;
70189+ real_mnt = real_mnt->mnt_parent;
70190+ mnt = &real_mnt->mnt;
70191+ continue;
70192+ }
70193+
70194+ parent = dentry->d_parent;
70195+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70196+ if (retval != NULL)
70197+ goto out;
70198+
70199+ dentry = parent;
70200+ }
70201+
70202+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70203+
70204+ /* gr_real_root is pinned so we don't have to hold a reference */
70205+ if (retval == NULL)
70206+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70207+out:
70208+ write_sequnlock(&rename_lock);
70209+ read_sequnlock_excl(&mount_lock);
70210+
70211+ BUG_ON(retval == NULL);
70212+
70213+ return retval;
70214+}
70215+
70216+static __inline__ struct acl_object_label *
70217+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70218+ const struct acl_subject_label *subj)
70219+{
70220+ char *path = NULL;
70221+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70222+}
70223+
70224+static __inline__ struct acl_object_label *
70225+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70226+ const struct acl_subject_label *subj)
70227+{
70228+ char *path = NULL;
70229+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70230+}
70231+
70232+static __inline__ struct acl_object_label *
70233+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70234+ const struct acl_subject_label *subj, char *path)
70235+{
70236+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70237+}
70238+
70239+struct acl_subject_label *
70240+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70241+ const struct acl_role_label *role)
70242+{
70243+ struct dentry *dentry = (struct dentry *) l_dentry;
70244+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70245+ struct mount *real_mnt = real_mount(mnt);
70246+ struct acl_subject_label *retval;
70247+ struct dentry *parent;
70248+
70249+ read_seqlock_excl(&mount_lock);
70250+ write_seqlock(&rename_lock);
70251+
70252+ for (;;) {
70253+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70254+ break;
70255+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70256+ if (!mnt_has_parent(real_mnt))
70257+ break;
70258+
70259+ spin_lock(&dentry->d_lock);
70260+ read_lock(&gr_inode_lock);
70261+ retval =
70262+ lookup_acl_subj_label(__get_ino(dentry),
70263+ __get_dev(dentry), role);
70264+ read_unlock(&gr_inode_lock);
70265+ spin_unlock(&dentry->d_lock);
70266+ if (retval != NULL)
70267+ goto out;
70268+
70269+ dentry = real_mnt->mnt_mountpoint;
70270+ real_mnt = real_mnt->mnt_parent;
70271+ mnt = &real_mnt->mnt;
70272+ continue;
70273+ }
70274+
70275+ spin_lock(&dentry->d_lock);
70276+ read_lock(&gr_inode_lock);
70277+ retval = lookup_acl_subj_label(__get_ino(dentry),
70278+ __get_dev(dentry), role);
70279+ read_unlock(&gr_inode_lock);
70280+ parent = dentry->d_parent;
70281+ spin_unlock(&dentry->d_lock);
70282+
70283+ if (retval != NULL)
70284+ goto out;
70285+
70286+ dentry = parent;
70287+ }
70288+
70289+ spin_lock(&dentry->d_lock);
70290+ read_lock(&gr_inode_lock);
70291+ retval = lookup_acl_subj_label(__get_ino(dentry),
70292+ __get_dev(dentry), role);
70293+ read_unlock(&gr_inode_lock);
70294+ spin_unlock(&dentry->d_lock);
70295+
70296+ if (unlikely(retval == NULL)) {
70297+ /* gr_real_root is pinned, we don't need to hold a reference */
70298+ read_lock(&gr_inode_lock);
70299+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70300+ __get_dev(gr_real_root.dentry), role);
70301+ read_unlock(&gr_inode_lock);
70302+ }
70303+out:
70304+ write_sequnlock(&rename_lock);
70305+ read_sequnlock_excl(&mount_lock);
70306+
70307+ BUG_ON(retval == NULL);
70308+
70309+ return retval;
70310+}
70311+
70312+void
70313+assign_special_role(const char *rolename)
70314+{
70315+ struct acl_object_label *obj;
70316+ struct acl_role_label *r;
70317+ struct acl_role_label *assigned = NULL;
70318+ struct task_struct *tsk;
70319+ struct file *filp;
70320+
70321+ FOR_EACH_ROLE_START(r)
70322+ if (!strcmp(rolename, r->rolename) &&
70323+ (r->roletype & GR_ROLE_SPECIAL)) {
70324+ assigned = r;
70325+ break;
70326+ }
70327+ FOR_EACH_ROLE_END(r)
70328+
70329+ if (!assigned)
70330+ return;
70331+
70332+ read_lock(&tasklist_lock);
70333+ read_lock(&grsec_exec_file_lock);
70334+
70335+ tsk = current->real_parent;
70336+ if (tsk == NULL)
70337+ goto out_unlock;
70338+
70339+ filp = tsk->exec_file;
70340+ if (filp == NULL)
70341+ goto out_unlock;
70342+
70343+ tsk->is_writable = 0;
70344+ tsk->inherited = 0;
70345+
70346+ tsk->acl_sp_role = 1;
70347+ tsk->acl_role_id = ++acl_sp_role_value;
70348+ tsk->role = assigned;
70349+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70350+
70351+ /* ignore additional mmap checks for processes that are writable
70352+ by the default ACL */
70353+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70354+ if (unlikely(obj->mode & GR_WRITE))
70355+ tsk->is_writable = 1;
70356+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70357+ if (unlikely(obj->mode & GR_WRITE))
70358+ tsk->is_writable = 1;
70359+
70360+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70361+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70362+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70363+#endif
70364+
70365+out_unlock:
70366+ read_unlock(&grsec_exec_file_lock);
70367+ read_unlock(&tasklist_lock);
70368+ return;
70369+}
70370+
70371+
70372+static void
70373+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70374+{
70375+ struct task_struct *task = current;
70376+ const struct cred *cred = current_cred();
70377+
70378+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70379+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70380+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70381+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70382+
70383+ return;
70384+}
70385+
70386+static void
70387+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70388+{
70389+ struct task_struct *task = current;
70390+ const struct cred *cred = current_cred();
70391+
70392+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70393+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70394+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70395+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70396+
70397+ return;
70398+}
70399+
70400+static void
70401+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70402+{
70403+ struct task_struct *task = current;
70404+ const struct cred *cred = current_cred();
70405+
70406+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70407+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70408+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70409+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70410+
70411+ return;
70412+}
70413+
70414+static void
70415+gr_set_proc_res(struct task_struct *task)
70416+{
70417+ struct acl_subject_label *proc;
70418+ unsigned short i;
70419+
70420+ proc = task->acl;
70421+
70422+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70423+ return;
70424+
70425+ for (i = 0; i < RLIM_NLIMITS; i++) {
70426+ unsigned long rlim_cur, rlim_max;
70427+
70428+ if (!(proc->resmask & (1U << i)))
70429+ continue;
70430+
70431+ rlim_cur = proc->res[i].rlim_cur;
70432+ rlim_max = proc->res[i].rlim_max;
70433+
70434+ if (i == RLIMIT_NOFILE) {
70435+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70436+ if (rlim_cur > saved_sysctl_nr_open)
70437+ rlim_cur = saved_sysctl_nr_open;
70438+ if (rlim_max > saved_sysctl_nr_open)
70439+ rlim_max = saved_sysctl_nr_open;
70440+ }
70441+
70442+ task->signal->rlim[i].rlim_cur = rlim_cur;
70443+ task->signal->rlim[i].rlim_max = rlim_max;
70444+
70445+ if (i == RLIMIT_CPU)
70446+ update_rlimit_cpu(task, rlim_cur);
70447+ }
70448+
70449+ return;
70450+}
70451+
70452+/* both of the below must be called with
70453+ rcu_read_lock();
70454+ read_lock(&tasklist_lock);
70455+ read_lock(&grsec_exec_file_lock);
70456+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70457+*/
70458+
70459+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70460+{
70461+ char *tmpname;
70462+ struct acl_subject_label *tmpsubj;
70463+ struct file *filp;
70464+ struct name_entry *nmatch;
70465+
70466+ filp = task->exec_file;
70467+ if (filp == NULL)
70468+ return NULL;
70469+
70470+ /* the following is to apply the correct subject
70471+ on binaries running when the RBAC system
70472+ is enabled, when the binaries have been
70473+ replaced or deleted since their execution
70474+ -----
70475+ when the RBAC system starts, the inode/dev
70476+ from exec_file will be one the RBAC system
70477+ is unaware of. It only knows the inode/dev
70478+ of the present file on disk, or the absence
70479+ of it.
70480+ */
70481+
70482+ if (filename)
70483+ nmatch = __lookup_name_entry(state, filename);
70484+ else {
70485+ preempt_disable();
70486+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70487+
70488+ nmatch = __lookup_name_entry(state, tmpname);
70489+ preempt_enable();
70490+ }
70491+ tmpsubj = NULL;
70492+ if (nmatch) {
70493+ if (nmatch->deleted)
70494+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70495+ else
70496+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70497+ }
70498+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70499+ then we fall back to a normal lookup based on the binary's ino/dev
70500+ */
70501+ if (tmpsubj == NULL && fallback)
70502+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70503+
70504+ return tmpsubj;
70505+}
70506+
70507+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70508+{
70509+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70510+}
70511+
70512+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70513+{
70514+ struct acl_object_label *obj;
70515+ struct file *filp;
70516+
70517+ filp = task->exec_file;
70518+
70519+ task->acl = subj;
70520+ task->is_writable = 0;
70521+ /* ignore additional mmap checks for processes that are writable
70522+ by the default ACL */
70523+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70524+ if (unlikely(obj->mode & GR_WRITE))
70525+ task->is_writable = 1;
70526+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70527+ if (unlikely(obj->mode & GR_WRITE))
70528+ task->is_writable = 1;
70529+
70530+ gr_set_proc_res(task);
70531+
70532+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70533+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70534+#endif
70535+}
70536+
70537+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70538+{
70539+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70540+}
70541+
70542+__u32
70543+gr_search_file(const struct dentry * dentry, const __u32 mode,
70544+ const struct vfsmount * mnt)
70545+{
70546+ __u32 retval = mode;
70547+ struct acl_subject_label *curracl;
70548+ struct acl_object_label *currobj;
70549+
70550+ if (unlikely(!(gr_status & GR_READY)))
70551+ return (mode & ~GR_AUDITS);
70552+
70553+ curracl = current->acl;
70554+
70555+ currobj = chk_obj_label(dentry, mnt, curracl);
70556+ retval = currobj->mode & mode;
70557+
70558+ /* if we're opening a specified transfer file for writing
70559+ (e.g. /dev/initctl), then transfer our role to init
70560+ */
70561+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70562+ current->role->roletype & GR_ROLE_PERSIST)) {
70563+ struct task_struct *task = init_pid_ns.child_reaper;
70564+
70565+ if (task->role != current->role) {
70566+ struct acl_subject_label *subj;
70567+
70568+ task->acl_sp_role = 0;
70569+ task->acl_role_id = current->acl_role_id;
70570+ task->role = current->role;
70571+ rcu_read_lock();
70572+ read_lock(&grsec_exec_file_lock);
70573+ subj = gr_get_subject_for_task(task, NULL, 1);
70574+ gr_apply_subject_to_task(task, subj);
70575+ read_unlock(&grsec_exec_file_lock);
70576+ rcu_read_unlock();
70577+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70578+ }
70579+ }
70580+
70581+ if (unlikely
70582+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70583+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70584+ __u32 new_mode = mode;
70585+
70586+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70587+
70588+ retval = new_mode;
70589+
70590+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70591+ new_mode |= GR_INHERIT;
70592+
70593+ if (!(mode & GR_NOLEARN))
70594+ gr_log_learn(dentry, mnt, new_mode);
70595+ }
70596+
70597+ return retval;
70598+}
70599+
70600+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70601+ const struct dentry *parent,
70602+ const struct vfsmount *mnt)
70603+{
70604+ struct name_entry *match;
70605+ struct acl_object_label *matchpo;
70606+ struct acl_subject_label *curracl;
70607+ char *path;
70608+
70609+ if (unlikely(!(gr_status & GR_READY)))
70610+ return NULL;
70611+
70612+ preempt_disable();
70613+ path = gr_to_filename_rbac(new_dentry, mnt);
70614+ match = lookup_name_entry_create(path);
70615+
70616+ curracl = current->acl;
70617+
70618+ if (match) {
70619+ read_lock(&gr_inode_lock);
70620+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70621+ read_unlock(&gr_inode_lock);
70622+
70623+ if (matchpo) {
70624+ preempt_enable();
70625+ return matchpo;
70626+ }
70627+ }
70628+
70629+ // lookup parent
70630+
70631+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70632+
70633+ preempt_enable();
70634+ return matchpo;
70635+}
70636+
70637+__u32
70638+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70639+ const struct vfsmount * mnt, const __u32 mode)
70640+{
70641+ struct acl_object_label *matchpo;
70642+ __u32 retval;
70643+
70644+ if (unlikely(!(gr_status & GR_READY)))
70645+ return (mode & ~GR_AUDITS);
70646+
70647+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70648+
70649+ retval = matchpo->mode & mode;
70650+
70651+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70652+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70653+ __u32 new_mode = mode;
70654+
70655+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70656+
70657+ gr_log_learn(new_dentry, mnt, new_mode);
70658+ return new_mode;
70659+ }
70660+
70661+ return retval;
70662+}
70663+
70664+__u32
70665+gr_check_link(const struct dentry * new_dentry,
70666+ const struct dentry * parent_dentry,
70667+ const struct vfsmount * parent_mnt,
70668+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70669+{
70670+ struct acl_object_label *obj;
70671+ __u32 oldmode, newmode;
70672+ __u32 needmode;
70673+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70674+ GR_DELETE | GR_INHERIT;
70675+
70676+ if (unlikely(!(gr_status & GR_READY)))
70677+ return (GR_CREATE | GR_LINK);
70678+
70679+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70680+ oldmode = obj->mode;
70681+
70682+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70683+ newmode = obj->mode;
70684+
70685+ needmode = newmode & checkmodes;
70686+
70687+ // old name for hardlink must have at least the permissions of the new name
70688+ if ((oldmode & needmode) != needmode)
70689+ goto bad;
70690+
70691+ // if old name had restrictions/auditing, make sure the new name does as well
70692+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70693+
70694+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70695+ if (is_privileged_binary(old_dentry))
70696+ needmode |= GR_SETID;
70697+
70698+ if ((newmode & needmode) != needmode)
70699+ goto bad;
70700+
70701+ // enforce minimum permissions
70702+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70703+ return newmode;
70704+bad:
70705+ needmode = oldmode;
70706+ if (is_privileged_binary(old_dentry))
70707+ needmode |= GR_SETID;
70708+
70709+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70710+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70711+ return (GR_CREATE | GR_LINK);
70712+ } else if (newmode & GR_SUPPRESS)
70713+ return GR_SUPPRESS;
70714+ else
70715+ return 0;
70716+}
70717+
70718+int
70719+gr_check_hidden_task(const struct task_struct *task)
70720+{
70721+ if (unlikely(!(gr_status & GR_READY)))
70722+ return 0;
70723+
70724+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70725+ return 1;
70726+
70727+ return 0;
70728+}
70729+
70730+int
70731+gr_check_protected_task(const struct task_struct *task)
70732+{
70733+ if (unlikely(!(gr_status & GR_READY) || !task))
70734+ return 0;
70735+
70736+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70737+ task->acl != current->acl)
70738+ return 1;
70739+
70740+ return 0;
70741+}
70742+
70743+int
70744+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70745+{
70746+ struct task_struct *p;
70747+ int ret = 0;
70748+
70749+ if (unlikely(!(gr_status & GR_READY) || !pid))
70750+ return ret;
70751+
70752+ read_lock(&tasklist_lock);
70753+ do_each_pid_task(pid, type, p) {
70754+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70755+ p->acl != current->acl) {
70756+ ret = 1;
70757+ goto out;
70758+ }
70759+ } while_each_pid_task(pid, type, p);
70760+out:
70761+ read_unlock(&tasklist_lock);
70762+
70763+ return ret;
70764+}
70765+
70766+void
70767+gr_copy_label(struct task_struct *tsk)
70768+{
70769+ struct task_struct *p = current;
70770+
70771+ tsk->inherited = p->inherited;
70772+ tsk->acl_sp_role = 0;
70773+ tsk->acl_role_id = p->acl_role_id;
70774+ tsk->acl = p->acl;
70775+ tsk->role = p->role;
70776+ tsk->signal->used_accept = 0;
70777+ tsk->signal->curr_ip = p->signal->curr_ip;
70778+ tsk->signal->saved_ip = p->signal->saved_ip;
70779+ if (p->exec_file)
70780+ get_file(p->exec_file);
70781+ tsk->exec_file = p->exec_file;
70782+ tsk->is_writable = p->is_writable;
70783+ if (unlikely(p->signal->used_accept)) {
70784+ p->signal->curr_ip = 0;
70785+ p->signal->saved_ip = 0;
70786+ }
70787+
70788+ return;
70789+}
70790+
70791+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70792+
70793+int
70794+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70795+{
70796+ unsigned int i;
70797+ __u16 num;
70798+ uid_t *uidlist;
70799+ uid_t curuid;
70800+ int realok = 0;
70801+ int effectiveok = 0;
70802+ int fsok = 0;
70803+ uid_t globalreal, globaleffective, globalfs;
70804+
70805+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70806+ struct user_struct *user;
70807+
70808+ if (!uid_valid(real))
70809+ goto skipit;
70810+
70811+ /* find user based on global namespace */
70812+
70813+ globalreal = GR_GLOBAL_UID(real);
70814+
70815+ user = find_user(make_kuid(&init_user_ns, globalreal));
70816+ if (user == NULL)
70817+ goto skipit;
70818+
70819+ if (gr_process_kernel_setuid_ban(user)) {
70820+ /* for find_user */
70821+ free_uid(user);
70822+ return 1;
70823+ }
70824+
70825+ /* for find_user */
70826+ free_uid(user);
70827+
70828+skipit:
70829+#endif
70830+
70831+ if (unlikely(!(gr_status & GR_READY)))
70832+ return 0;
70833+
70834+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70835+ gr_log_learn_uid_change(real, effective, fs);
70836+
70837+ num = current->acl->user_trans_num;
70838+ uidlist = current->acl->user_transitions;
70839+
70840+ if (uidlist == NULL)
70841+ return 0;
70842+
70843+ if (!uid_valid(real)) {
70844+ realok = 1;
70845+ globalreal = (uid_t)-1;
70846+ } else {
70847+ globalreal = GR_GLOBAL_UID(real);
70848+ }
70849+ if (!uid_valid(effective)) {
70850+ effectiveok = 1;
70851+ globaleffective = (uid_t)-1;
70852+ } else {
70853+ globaleffective = GR_GLOBAL_UID(effective);
70854+ }
70855+ if (!uid_valid(fs)) {
70856+ fsok = 1;
70857+ globalfs = (uid_t)-1;
70858+ } else {
70859+ globalfs = GR_GLOBAL_UID(fs);
70860+ }
70861+
70862+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70863+ for (i = 0; i < num; i++) {
70864+ curuid = uidlist[i];
70865+ if (globalreal == curuid)
70866+ realok = 1;
70867+ if (globaleffective == curuid)
70868+ effectiveok = 1;
70869+ if (globalfs == curuid)
70870+ fsok = 1;
70871+ }
70872+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70873+ for (i = 0; i < num; i++) {
70874+ curuid = uidlist[i];
70875+ if (globalreal == curuid)
70876+ break;
70877+ if (globaleffective == curuid)
70878+ break;
70879+ if (globalfs == curuid)
70880+ break;
70881+ }
70882+ /* not in deny list */
70883+ if (i == num) {
70884+ realok = 1;
70885+ effectiveok = 1;
70886+ fsok = 1;
70887+ }
70888+ }
70889+
70890+ if (realok && effectiveok && fsok)
70891+ return 0;
70892+ else {
70893+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70894+ return 1;
70895+ }
70896+}
70897+
70898+int
70899+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70900+{
70901+ unsigned int i;
70902+ __u16 num;
70903+ gid_t *gidlist;
70904+ gid_t curgid;
70905+ int realok = 0;
70906+ int effectiveok = 0;
70907+ int fsok = 0;
70908+ gid_t globalreal, globaleffective, globalfs;
70909+
70910+ if (unlikely(!(gr_status & GR_READY)))
70911+ return 0;
70912+
70913+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70914+ gr_log_learn_gid_change(real, effective, fs);
70915+
70916+ num = current->acl->group_trans_num;
70917+ gidlist = current->acl->group_transitions;
70918+
70919+ if (gidlist == NULL)
70920+ return 0;
70921+
70922+ if (!gid_valid(real)) {
70923+ realok = 1;
70924+ globalreal = (gid_t)-1;
70925+ } else {
70926+ globalreal = GR_GLOBAL_GID(real);
70927+ }
70928+ if (!gid_valid(effective)) {
70929+ effectiveok = 1;
70930+ globaleffective = (gid_t)-1;
70931+ } else {
70932+ globaleffective = GR_GLOBAL_GID(effective);
70933+ }
70934+ if (!gid_valid(fs)) {
70935+ fsok = 1;
70936+ globalfs = (gid_t)-1;
70937+ } else {
70938+ globalfs = GR_GLOBAL_GID(fs);
70939+ }
70940+
70941+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
70942+ for (i = 0; i < num; i++) {
70943+ curgid = gidlist[i];
70944+ if (globalreal == curgid)
70945+ realok = 1;
70946+ if (globaleffective == curgid)
70947+ effectiveok = 1;
70948+ if (globalfs == curgid)
70949+ fsok = 1;
70950+ }
70951+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
70952+ for (i = 0; i < num; i++) {
70953+ curgid = gidlist[i];
70954+ if (globalreal == curgid)
70955+ break;
70956+ if (globaleffective == curgid)
70957+ break;
70958+ if (globalfs == curgid)
70959+ break;
70960+ }
70961+ /* not in deny list */
70962+ if (i == num) {
70963+ realok = 1;
70964+ effectiveok = 1;
70965+ fsok = 1;
70966+ }
70967+ }
70968+
70969+ if (realok && effectiveok && fsok)
70970+ return 0;
70971+ else {
70972+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70973+ return 1;
70974+ }
70975+}
70976+
70977+extern int gr_acl_is_capable(const int cap);
70978+
70979+void
70980+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
70981+{
70982+ struct acl_role_label *role = task->role;
70983+ struct acl_role_label *origrole = role;
70984+ struct acl_subject_label *subj = NULL;
70985+ struct acl_object_label *obj;
70986+ struct file *filp;
70987+ uid_t uid;
70988+ gid_t gid;
70989+
70990+ if (unlikely(!(gr_status & GR_READY)))
70991+ return;
70992+
70993+ uid = GR_GLOBAL_UID(kuid);
70994+ gid = GR_GLOBAL_GID(kgid);
70995+
70996+ filp = task->exec_file;
70997+
70998+ /* kernel process, we'll give them the kernel role */
70999+ if (unlikely(!filp)) {
71000+ task->role = running_polstate.kernel_role;
71001+ task->acl = running_polstate.kernel_role->root_label;
71002+ return;
71003+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71004+ /* save the current ip at time of role lookup so that the proper
71005+ IP will be learned for role_allowed_ip */
71006+ task->signal->saved_ip = task->signal->curr_ip;
71007+ role = lookup_acl_role_label(task, uid, gid);
71008+ }
71009+
71010+ /* don't change the role if we're not a privileged process */
71011+ if (role && task->role != role &&
71012+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71013+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71014+ return;
71015+
71016+ task->role = role;
71017+
71018+ if (task->inherited) {
71019+ /* if we reached our subject through inheritance, then first see
71020+ if there's a subject of the same name in the new role that has
71021+ an object that would result in the same inherited subject
71022+ */
71023+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
71024+ if (subj) {
71025+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
71026+ if (!(obj->mode & GR_INHERIT))
71027+ subj = NULL;
71028+ }
71029+
71030+ }
71031+ if (subj == NULL) {
71032+ /* otherwise:
71033+ perform subject lookup in possibly new role
71034+ we can use this result below in the case where role == task->role
71035+ */
71036+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71037+ }
71038+
71039+ /* if we changed uid/gid, but result in the same role
71040+ and are using inheritance, don't lose the inherited subject
71041+ if current subject is other than what normal lookup
71042+ would result in, we arrived via inheritance, don't
71043+ lose subject
71044+ */
71045+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
71046+ (subj == task->acl)))
71047+ task->acl = subj;
71048+
71049+ /* leave task->inherited unaffected */
71050+
71051+ task->is_writable = 0;
71052+
71053+ /* ignore additional mmap checks for processes that are writable
71054+ by the default ACL */
71055+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71056+ if (unlikely(obj->mode & GR_WRITE))
71057+ task->is_writable = 1;
71058+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71059+ if (unlikely(obj->mode & GR_WRITE))
71060+ task->is_writable = 1;
71061+
71062+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71063+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71064+#endif
71065+
71066+ gr_set_proc_res(task);
71067+
71068+ return;
71069+}
71070+
71071+int
71072+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71073+ const int unsafe_flags)
71074+{
71075+ struct task_struct *task = current;
71076+ struct acl_subject_label *newacl;
71077+ struct acl_object_label *obj;
71078+ __u32 retmode;
71079+
71080+ if (unlikely(!(gr_status & GR_READY)))
71081+ return 0;
71082+
71083+ newacl = chk_subj_label(dentry, mnt, task->role);
71084+
71085+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71086+ did an exec
71087+ */
71088+ rcu_read_lock();
71089+ read_lock(&tasklist_lock);
71090+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71091+ (task->parent->acl->mode & GR_POVERRIDE))) {
71092+ read_unlock(&tasklist_lock);
71093+ rcu_read_unlock();
71094+ goto skip_check;
71095+ }
71096+ read_unlock(&tasklist_lock);
71097+ rcu_read_unlock();
71098+
71099+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71100+ !(task->role->roletype & GR_ROLE_GOD) &&
71101+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71102+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71103+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71104+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71105+ else
71106+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71107+ return -EACCES;
71108+ }
71109+
71110+skip_check:
71111+
71112+ obj = chk_obj_label(dentry, mnt, task->acl);
71113+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71114+
71115+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71116+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71117+ if (obj->nested)
71118+ task->acl = obj->nested;
71119+ else
71120+ task->acl = newacl;
71121+ task->inherited = 0;
71122+ } else {
71123+ task->inherited = 1;
71124+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71125+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71126+ }
71127+
71128+ task->is_writable = 0;
71129+
71130+ /* ignore additional mmap checks for processes that are writable
71131+ by the default ACL */
71132+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71133+ if (unlikely(obj->mode & GR_WRITE))
71134+ task->is_writable = 1;
71135+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71136+ if (unlikely(obj->mode & GR_WRITE))
71137+ task->is_writable = 1;
71138+
71139+ gr_set_proc_res(task);
71140+
71141+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71142+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71143+#endif
71144+ return 0;
71145+}
71146+
71147+/* always called with valid inodev ptr */
71148+static void
71149+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71150+{
71151+ struct acl_object_label *matchpo;
71152+ struct acl_subject_label *matchps;
71153+ struct acl_subject_label *subj;
71154+ struct acl_role_label *role;
71155+ unsigned int x;
71156+
71157+ FOR_EACH_ROLE_START(role)
71158+ FOR_EACH_SUBJECT_START(role, subj, x)
71159+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71160+ matchpo->mode |= GR_DELETED;
71161+ FOR_EACH_SUBJECT_END(subj,x)
71162+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71163+ /* nested subjects aren't in the role's subj_hash table */
71164+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71165+ matchpo->mode |= GR_DELETED;
71166+ FOR_EACH_NESTED_SUBJECT_END(subj)
71167+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71168+ matchps->mode |= GR_DELETED;
71169+ FOR_EACH_ROLE_END(role)
71170+
71171+ inodev->nentry->deleted = 1;
71172+
71173+ return;
71174+}
71175+
71176+void
71177+gr_handle_delete(const u64 ino, const dev_t dev)
71178+{
71179+ struct inodev_entry *inodev;
71180+
71181+ if (unlikely(!(gr_status & GR_READY)))
71182+ return;
71183+
71184+ write_lock(&gr_inode_lock);
71185+ inodev = lookup_inodev_entry(ino, dev);
71186+ if (inodev != NULL)
71187+ do_handle_delete(inodev, ino, dev);
71188+ write_unlock(&gr_inode_lock);
71189+
71190+ return;
71191+}
71192+
71193+static void
71194+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71195+ const u64 newinode, const dev_t newdevice,
71196+ struct acl_subject_label *subj)
71197+{
71198+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71199+ struct acl_object_label *match;
71200+
71201+ match = subj->obj_hash[index];
71202+
71203+ while (match && (match->inode != oldinode ||
71204+ match->device != olddevice ||
71205+ !(match->mode & GR_DELETED)))
71206+ match = match->next;
71207+
71208+ if (match && (match->inode == oldinode)
71209+ && (match->device == olddevice)
71210+ && (match->mode & GR_DELETED)) {
71211+ if (match->prev == NULL) {
71212+ subj->obj_hash[index] = match->next;
71213+ if (match->next != NULL)
71214+ match->next->prev = NULL;
71215+ } else {
71216+ match->prev->next = match->next;
71217+ if (match->next != NULL)
71218+ match->next->prev = match->prev;
71219+ }
71220+ match->prev = NULL;
71221+ match->next = NULL;
71222+ match->inode = newinode;
71223+ match->device = newdevice;
71224+ match->mode &= ~GR_DELETED;
71225+
71226+ insert_acl_obj_label(match, subj);
71227+ }
71228+
71229+ return;
71230+}
71231+
71232+static void
71233+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71234+ const u64 newinode, const dev_t newdevice,
71235+ struct acl_role_label *role)
71236+{
71237+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71238+ struct acl_subject_label *match;
71239+
71240+ match = role->subj_hash[index];
71241+
71242+ while (match && (match->inode != oldinode ||
71243+ match->device != olddevice ||
71244+ !(match->mode & GR_DELETED)))
71245+ match = match->next;
71246+
71247+ if (match && (match->inode == oldinode)
71248+ && (match->device == olddevice)
71249+ && (match->mode & GR_DELETED)) {
71250+ if (match->prev == NULL) {
71251+ role->subj_hash[index] = match->next;
71252+ if (match->next != NULL)
71253+ match->next->prev = NULL;
71254+ } else {
71255+ match->prev->next = match->next;
71256+ if (match->next != NULL)
71257+ match->next->prev = match->prev;
71258+ }
71259+ match->prev = NULL;
71260+ match->next = NULL;
71261+ match->inode = newinode;
71262+ match->device = newdevice;
71263+ match->mode &= ~GR_DELETED;
71264+
71265+ insert_acl_subj_label(match, role);
71266+ }
71267+
71268+ return;
71269+}
71270+
71271+static void
71272+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71273+ const u64 newinode, const dev_t newdevice)
71274+{
71275+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71276+ struct inodev_entry *match;
71277+
71278+ match = running_polstate.inodev_set.i_hash[index];
71279+
71280+ while (match && (match->nentry->inode != oldinode ||
71281+ match->nentry->device != olddevice || !match->nentry->deleted))
71282+ match = match->next;
71283+
71284+ if (match && (match->nentry->inode == oldinode)
71285+ && (match->nentry->device == olddevice) &&
71286+ match->nentry->deleted) {
71287+ if (match->prev == NULL) {
71288+ running_polstate.inodev_set.i_hash[index] = match->next;
71289+ if (match->next != NULL)
71290+ match->next->prev = NULL;
71291+ } else {
71292+ match->prev->next = match->next;
71293+ if (match->next != NULL)
71294+ match->next->prev = match->prev;
71295+ }
71296+ match->prev = NULL;
71297+ match->next = NULL;
71298+ match->nentry->inode = newinode;
71299+ match->nentry->device = newdevice;
71300+ match->nentry->deleted = 0;
71301+
71302+ insert_inodev_entry(match);
71303+ }
71304+
71305+ return;
71306+}
71307+
71308+static void
71309+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71310+{
71311+ struct acl_subject_label *subj;
71312+ struct acl_role_label *role;
71313+ unsigned int x;
71314+
71315+ FOR_EACH_ROLE_START(role)
71316+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71317+
71318+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71319+ if ((subj->inode == ino) && (subj->device == dev)) {
71320+ subj->inode = ino;
71321+ subj->device = dev;
71322+ }
71323+ /* nested subjects aren't in the role's subj_hash table */
71324+ update_acl_obj_label(matchn->inode, matchn->device,
71325+ ino, dev, subj);
71326+ FOR_EACH_NESTED_SUBJECT_END(subj)
71327+ FOR_EACH_SUBJECT_START(role, subj, x)
71328+ update_acl_obj_label(matchn->inode, matchn->device,
71329+ ino, dev, subj);
71330+ FOR_EACH_SUBJECT_END(subj,x)
71331+ FOR_EACH_ROLE_END(role)
71332+
71333+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71334+
71335+ return;
71336+}
71337+
71338+static void
71339+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71340+ const struct vfsmount *mnt)
71341+{
71342+ u64 ino = __get_ino(dentry);
71343+ dev_t dev = __get_dev(dentry);
71344+
71345+ __do_handle_create(matchn, ino, dev);
71346+
71347+ return;
71348+}
71349+
71350+void
71351+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71352+{
71353+ struct name_entry *matchn;
71354+
71355+ if (unlikely(!(gr_status & GR_READY)))
71356+ return;
71357+
71358+ preempt_disable();
71359+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71360+
71361+ if (unlikely((unsigned long)matchn)) {
71362+ write_lock(&gr_inode_lock);
71363+ do_handle_create(matchn, dentry, mnt);
71364+ write_unlock(&gr_inode_lock);
71365+ }
71366+ preempt_enable();
71367+
71368+ return;
71369+}
71370+
71371+void
71372+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71373+{
71374+ struct name_entry *matchn;
71375+
71376+ if (unlikely(!(gr_status & GR_READY)))
71377+ return;
71378+
71379+ preempt_disable();
71380+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71381+
71382+ if (unlikely((unsigned long)matchn)) {
71383+ write_lock(&gr_inode_lock);
71384+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71385+ write_unlock(&gr_inode_lock);
71386+ }
71387+ preempt_enable();
71388+
71389+ return;
71390+}
71391+
71392+void
71393+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71394+ struct dentry *old_dentry,
71395+ struct dentry *new_dentry,
71396+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71397+{
71398+ struct name_entry *matchn;
71399+ struct name_entry *matchn2 = NULL;
71400+ struct inodev_entry *inodev;
71401+ struct inode *inode = new_dentry->d_inode;
71402+ u64 old_ino = __get_ino(old_dentry);
71403+ dev_t old_dev = __get_dev(old_dentry);
71404+ unsigned int exchange = flags & RENAME_EXCHANGE;
71405+
71406+ /* vfs_rename swaps the name and parent link for old_dentry and
71407+ new_dentry
71408+ at this point, old_dentry has the new name, parent link, and inode
71409+ for the renamed file
71410+ if a file is being replaced by a rename, new_dentry has the inode
71411+ and name for the replaced file
71412+ */
71413+
71414+ if (unlikely(!(gr_status & GR_READY)))
71415+ return;
71416+
71417+ preempt_disable();
71418+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71419+
71420+ /* exchange cases:
71421+ a filename exists for the source, but not dest
71422+ do a recreate on source
71423+ a filename exists for the dest, but not source
71424+ do a recreate on dest
71425+ a filename exists for both source and dest
71426+ delete source and dest, then create source and dest
71427+ a filename exists for neither source nor dest
71428+ no updates needed
71429+
71430+ the name entry lookups get us the old inode/dev associated with
71431+ each name, so do the deletes first (if possible) so that when
71432+ we do the create, we pick up on the right entries
71433+ */
71434+
71435+ if (exchange)
71436+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71437+
71438+ /* we wouldn't have to check d_inode if it weren't for
71439+ NFS silly-renaming
71440+ */
71441+
71442+ write_lock(&gr_inode_lock);
71443+ if (unlikely((replace || exchange) && inode)) {
71444+ u64 new_ino = __get_ino(new_dentry);
71445+ dev_t new_dev = __get_dev(new_dentry);
71446+
71447+ inodev = lookup_inodev_entry(new_ino, new_dev);
71448+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71449+ do_handle_delete(inodev, new_ino, new_dev);
71450+ }
71451+
71452+ inodev = lookup_inodev_entry(old_ino, old_dev);
71453+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71454+ do_handle_delete(inodev, old_ino, old_dev);
71455+
71456+ if (unlikely(matchn != NULL))
71457+ do_handle_create(matchn, old_dentry, mnt);
71458+
71459+ if (unlikely(matchn2 != NULL))
71460+ do_handle_create(matchn2, new_dentry, mnt);
71461+
71462+ write_unlock(&gr_inode_lock);
71463+ preempt_enable();
71464+
71465+ return;
71466+}
71467+
71468+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71469+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71470+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71471+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71472+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71473+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71474+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71475+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71476+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71477+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71478+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71479+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71480+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71481+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71482+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71483+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71484+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71485+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71486+};
71487+
71488+void
71489+gr_learn_resource(const struct task_struct *task,
71490+ const int res, const unsigned long wanted, const int gt)
71491+{
71492+ struct acl_subject_label *acl;
71493+ const struct cred *cred;
71494+
71495+ if (unlikely((gr_status & GR_READY) &&
71496+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71497+ goto skip_reslog;
71498+
71499+ gr_log_resource(task, res, wanted, gt);
71500+skip_reslog:
71501+
71502+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71503+ return;
71504+
71505+ acl = task->acl;
71506+
71507+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71508+ !(acl->resmask & (1U << (unsigned short) res))))
71509+ return;
71510+
71511+ if (wanted >= acl->res[res].rlim_cur) {
71512+ unsigned long res_add;
71513+
71514+ res_add = wanted + res_learn_bumps[res];
71515+
71516+ acl->res[res].rlim_cur = res_add;
71517+
71518+ if (wanted > acl->res[res].rlim_max)
71519+ acl->res[res].rlim_max = res_add;
71520+
71521+ /* only log the subject filename, since resource logging is supported for
71522+ single-subject learning only */
71523+ rcu_read_lock();
71524+ cred = __task_cred(task);
71525+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71526+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71527+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71528+ "", (unsigned long) res, &task->signal->saved_ip);
71529+ rcu_read_unlock();
71530+ }
71531+
71532+ return;
71533+}
71534+EXPORT_SYMBOL_GPL(gr_learn_resource);
71535+#endif
71536+
71537+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71538+void
71539+pax_set_initial_flags(struct linux_binprm *bprm)
71540+{
71541+ struct task_struct *task = current;
71542+ struct acl_subject_label *proc;
71543+ unsigned long flags;
71544+
71545+ if (unlikely(!(gr_status & GR_READY)))
71546+ return;
71547+
71548+ flags = pax_get_flags(task);
71549+
71550+ proc = task->acl;
71551+
71552+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71553+ flags &= ~MF_PAX_PAGEEXEC;
71554+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71555+ flags &= ~MF_PAX_SEGMEXEC;
71556+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71557+ flags &= ~MF_PAX_RANDMMAP;
71558+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71559+ flags &= ~MF_PAX_EMUTRAMP;
71560+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71561+ flags &= ~MF_PAX_MPROTECT;
71562+
71563+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71564+ flags |= MF_PAX_PAGEEXEC;
71565+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71566+ flags |= MF_PAX_SEGMEXEC;
71567+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71568+ flags |= MF_PAX_RANDMMAP;
71569+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71570+ flags |= MF_PAX_EMUTRAMP;
71571+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71572+ flags |= MF_PAX_MPROTECT;
71573+
71574+ pax_set_flags(task, flags);
71575+
71576+ return;
71577+}
71578+#endif
71579+
71580+int
71581+gr_handle_proc_ptrace(struct task_struct *task)
71582+{
71583+ struct file *filp;
71584+ struct task_struct *tmp = task;
71585+ struct task_struct *curtemp = current;
71586+ __u32 retmode;
71587+
71588+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71589+ if (unlikely(!(gr_status & GR_READY)))
71590+ return 0;
71591+#endif
71592+
71593+ read_lock(&tasklist_lock);
71594+ read_lock(&grsec_exec_file_lock);
71595+ filp = task->exec_file;
71596+
71597+ while (task_pid_nr(tmp) > 0) {
71598+ if (tmp == curtemp)
71599+ break;
71600+ tmp = tmp->real_parent;
71601+ }
71602+
71603+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71604+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71605+ read_unlock(&grsec_exec_file_lock);
71606+ read_unlock(&tasklist_lock);
71607+ return 1;
71608+ }
71609+
71610+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71611+ if (!(gr_status & GR_READY)) {
71612+ read_unlock(&grsec_exec_file_lock);
71613+ read_unlock(&tasklist_lock);
71614+ return 0;
71615+ }
71616+#endif
71617+
71618+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71619+ read_unlock(&grsec_exec_file_lock);
71620+ read_unlock(&tasklist_lock);
71621+
71622+ if (retmode & GR_NOPTRACE)
71623+ return 1;
71624+
71625+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71626+ && (current->acl != task->acl || (current->acl != current->role->root_label
71627+ && task_pid_nr(current) != task_pid_nr(task))))
71628+ return 1;
71629+
71630+ return 0;
71631+}
71632+
71633+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71634+{
71635+ if (unlikely(!(gr_status & GR_READY)))
71636+ return;
71637+
71638+ if (!(current->role->roletype & GR_ROLE_GOD))
71639+ return;
71640+
71641+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71642+ p->role->rolename, gr_task_roletype_to_char(p),
71643+ p->acl->filename);
71644+}
71645+
71646+int
71647+gr_handle_ptrace(struct task_struct *task, const long request)
71648+{
71649+ struct task_struct *tmp = task;
71650+ struct task_struct *curtemp = current;
71651+ __u32 retmode;
71652+
71653+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71654+ if (unlikely(!(gr_status & GR_READY)))
71655+ return 0;
71656+#endif
71657+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71658+ read_lock(&tasklist_lock);
71659+ while (task_pid_nr(tmp) > 0) {
71660+ if (tmp == curtemp)
71661+ break;
71662+ tmp = tmp->real_parent;
71663+ }
71664+
71665+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71666+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71667+ read_unlock(&tasklist_lock);
71668+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71669+ return 1;
71670+ }
71671+ read_unlock(&tasklist_lock);
71672+ }
71673+
71674+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71675+ if (!(gr_status & GR_READY))
71676+ return 0;
71677+#endif
71678+
71679+ read_lock(&grsec_exec_file_lock);
71680+ if (unlikely(!task->exec_file)) {
71681+ read_unlock(&grsec_exec_file_lock);
71682+ return 0;
71683+ }
71684+
71685+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71686+ read_unlock(&grsec_exec_file_lock);
71687+
71688+ if (retmode & GR_NOPTRACE) {
71689+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71690+ return 1;
71691+ }
71692+
71693+ if (retmode & GR_PTRACERD) {
71694+ switch (request) {
71695+ case PTRACE_SEIZE:
71696+ case PTRACE_POKETEXT:
71697+ case PTRACE_POKEDATA:
71698+ case PTRACE_POKEUSR:
71699+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71700+ case PTRACE_SETREGS:
71701+ case PTRACE_SETFPREGS:
71702+#endif
71703+#ifdef CONFIG_X86
71704+ case PTRACE_SETFPXREGS:
71705+#endif
71706+#ifdef CONFIG_ALTIVEC
71707+ case PTRACE_SETVRREGS:
71708+#endif
71709+ return 1;
71710+ default:
71711+ return 0;
71712+ }
71713+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71714+ !(current->role->roletype & GR_ROLE_GOD) &&
71715+ (current->acl != task->acl)) {
71716+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71717+ return 1;
71718+ }
71719+
71720+ return 0;
71721+}
71722+
71723+static int is_writable_mmap(const struct file *filp)
71724+{
71725+ struct task_struct *task = current;
71726+ struct acl_object_label *obj, *obj2;
71727+
71728+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71729+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71730+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71731+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71732+ task->role->root_label);
71733+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71734+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71735+ return 1;
71736+ }
71737+ }
71738+ return 0;
71739+}
71740+
71741+int
71742+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71743+{
71744+ __u32 mode;
71745+
71746+ if (unlikely(!file || !(prot & PROT_EXEC)))
71747+ return 1;
71748+
71749+ if (is_writable_mmap(file))
71750+ return 0;
71751+
71752+ mode =
71753+ gr_search_file(file->f_path.dentry,
71754+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71755+ file->f_path.mnt);
71756+
71757+ if (!gr_tpe_allow(file))
71758+ return 0;
71759+
71760+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71761+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71762+ return 0;
71763+ } else if (unlikely(!(mode & GR_EXEC))) {
71764+ return 0;
71765+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71766+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71767+ return 1;
71768+ }
71769+
71770+ return 1;
71771+}
71772+
71773+int
71774+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71775+{
71776+ __u32 mode;
71777+
71778+ if (unlikely(!file || !(prot & PROT_EXEC)))
71779+ return 1;
71780+
71781+ if (is_writable_mmap(file))
71782+ return 0;
71783+
71784+ mode =
71785+ gr_search_file(file->f_path.dentry,
71786+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71787+ file->f_path.mnt);
71788+
71789+ if (!gr_tpe_allow(file))
71790+ return 0;
71791+
71792+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71793+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71794+ return 0;
71795+ } else if (unlikely(!(mode & GR_EXEC))) {
71796+ return 0;
71797+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71798+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71799+ return 1;
71800+ }
71801+
71802+ return 1;
71803+}
71804+
71805+void
71806+gr_acl_handle_psacct(struct task_struct *task, const long code)
71807+{
71808+ unsigned long runtime, cputime;
71809+ cputime_t utime, stime;
71810+ unsigned int wday, cday;
71811+ __u8 whr, chr;
71812+ __u8 wmin, cmin;
71813+ __u8 wsec, csec;
71814+ struct timespec curtime, starttime;
71815+
71816+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71817+ !(task->acl->mode & GR_PROCACCT)))
71818+ return;
71819+
71820+ curtime = ns_to_timespec(ktime_get_ns());
71821+ starttime = ns_to_timespec(task->start_time);
71822+ runtime = curtime.tv_sec - starttime.tv_sec;
71823+ wday = runtime / (60 * 60 * 24);
71824+ runtime -= wday * (60 * 60 * 24);
71825+ whr = runtime / (60 * 60);
71826+ runtime -= whr * (60 * 60);
71827+ wmin = runtime / 60;
71828+ runtime -= wmin * 60;
71829+ wsec = runtime;
71830+
71831+ task_cputime(task, &utime, &stime);
71832+ cputime = cputime_to_secs(utime + stime);
71833+ cday = cputime / (60 * 60 * 24);
71834+ cputime -= cday * (60 * 60 * 24);
71835+ chr = cputime / (60 * 60);
71836+ cputime -= chr * (60 * 60);
71837+ cmin = cputime / 60;
71838+ cputime -= cmin * 60;
71839+ csec = cputime;
71840+
71841+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71842+
71843+ return;
71844+}
71845+
71846+#ifdef CONFIG_TASKSTATS
71847+int gr_is_taskstats_denied(int pid)
71848+{
71849+ struct task_struct *task;
71850+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71851+ const struct cred *cred;
71852+#endif
71853+ int ret = 0;
71854+
71855+ /* restrict taskstats viewing to un-chrooted root users
71856+ who have the 'view' subject flag if the RBAC system is enabled
71857+ */
71858+
71859+ rcu_read_lock();
71860+ read_lock(&tasklist_lock);
71861+ task = find_task_by_vpid(pid);
71862+ if (task) {
71863+#ifdef CONFIG_GRKERNSEC_CHROOT
71864+ if (proc_is_chrooted(task))
71865+ ret = -EACCES;
71866+#endif
71867+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71868+ cred = __task_cred(task);
71869+#ifdef CONFIG_GRKERNSEC_PROC_USER
71870+ if (gr_is_global_nonroot(cred->uid))
71871+ ret = -EACCES;
71872+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71873+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71874+ ret = -EACCES;
71875+#endif
71876+#endif
71877+ if (gr_status & GR_READY) {
71878+ if (!(task->acl->mode & GR_VIEW))
71879+ ret = -EACCES;
71880+ }
71881+ } else
71882+ ret = -ENOENT;
71883+
71884+ read_unlock(&tasklist_lock);
71885+ rcu_read_unlock();
71886+
71887+ return ret;
71888+}
71889+#endif
71890+
71891+/* AUXV entries are filled via a descendant of search_binary_handler
71892+ after we've already applied the subject for the target
71893+*/
71894+int gr_acl_enable_at_secure(void)
71895+{
71896+ if (unlikely(!(gr_status & GR_READY)))
71897+ return 0;
71898+
71899+ if (current->acl->mode & GR_ATSECURE)
71900+ return 1;
71901+
71902+ return 0;
71903+}
71904+
71905+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71906+{
71907+ struct task_struct *task = current;
71908+ struct dentry *dentry = file->f_path.dentry;
71909+ struct vfsmount *mnt = file->f_path.mnt;
71910+ struct acl_object_label *obj, *tmp;
71911+ struct acl_subject_label *subj;
71912+ unsigned int bufsize;
71913+ int is_not_root;
71914+ char *path;
71915+ dev_t dev = __get_dev(dentry);
71916+
71917+ if (unlikely(!(gr_status & GR_READY)))
71918+ return 1;
71919+
71920+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71921+ return 1;
71922+
71923+ /* ignore Eric Biederman */
71924+ if (IS_PRIVATE(dentry->d_inode))
71925+ return 1;
71926+
71927+ subj = task->acl;
71928+ read_lock(&gr_inode_lock);
71929+ do {
71930+ obj = lookup_acl_obj_label(ino, dev, subj);
71931+ if (obj != NULL) {
71932+ read_unlock(&gr_inode_lock);
71933+ return (obj->mode & GR_FIND) ? 1 : 0;
71934+ }
71935+ } while ((subj = subj->parent_subject));
71936+ read_unlock(&gr_inode_lock);
71937+
71938+ /* this is purely an optimization since we're looking for an object
71939+ for the directory we're doing a readdir on
71940+ if it's possible for any globbed object to match the entry we're
71941+ filling into the directory, then the object we find here will be
71942+ an anchor point with attached globbed objects
71943+ */
71944+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
71945+ if (obj->globbed == NULL)
71946+ return (obj->mode & GR_FIND) ? 1 : 0;
71947+
71948+ is_not_root = ((obj->filename[0] == '/') &&
71949+ (obj->filename[1] == '\0')) ? 0 : 1;
71950+ bufsize = PAGE_SIZE - namelen - is_not_root;
71951+
71952+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
71953+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
71954+ return 1;
71955+
71956+ preempt_disable();
71957+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71958+ bufsize);
71959+
71960+ bufsize = strlen(path);
71961+
71962+ /* if base is "/", don't append an additional slash */
71963+ if (is_not_root)
71964+ *(path + bufsize) = '/';
71965+ memcpy(path + bufsize + is_not_root, name, namelen);
71966+ *(path + bufsize + namelen + is_not_root) = '\0';
71967+
71968+ tmp = obj->globbed;
71969+ while (tmp) {
71970+ if (!glob_match(tmp->filename, path)) {
71971+ preempt_enable();
71972+ return (tmp->mode & GR_FIND) ? 1 : 0;
71973+ }
71974+ tmp = tmp->next;
71975+ }
71976+ preempt_enable();
71977+ return (obj->mode & GR_FIND) ? 1 : 0;
71978+}
71979+
71980+void gr_put_exec_file(struct task_struct *task)
71981+{
71982+ struct file *filp;
71983+
71984+ write_lock(&grsec_exec_file_lock);
71985+ filp = task->exec_file;
71986+ task->exec_file = NULL;
71987+ write_unlock(&grsec_exec_file_lock);
71988+
71989+ if (filp)
71990+ fput(filp);
71991+
71992+ return;
71993+}
71994+
71995+
71996+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
71997+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
71998+#endif
71999+#ifdef CONFIG_SECURITY
72000+EXPORT_SYMBOL_GPL(gr_check_user_change);
72001+EXPORT_SYMBOL_GPL(gr_check_group_change);
72002+#endif
72003+
72004diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72005new file mode 100644
72006index 0000000..18ffbbd
72007--- /dev/null
72008+++ b/grsecurity/gracl_alloc.c
72009@@ -0,0 +1,105 @@
72010+#include <linux/kernel.h>
72011+#include <linux/mm.h>
72012+#include <linux/slab.h>
72013+#include <linux/vmalloc.h>
72014+#include <linux/gracl.h>
72015+#include <linux/grsecurity.h>
72016+
72017+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72018+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72019+
72020+static __inline__ int
72021+alloc_pop(void)
72022+{
72023+ if (current_alloc_state->alloc_stack_next == 1)
72024+ return 0;
72025+
72026+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72027+
72028+ current_alloc_state->alloc_stack_next--;
72029+
72030+ return 1;
72031+}
72032+
72033+static __inline__ int
72034+alloc_push(void *buf)
72035+{
72036+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72037+ return 1;
72038+
72039+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72040+
72041+ current_alloc_state->alloc_stack_next++;
72042+
72043+ return 0;
72044+}
72045+
72046+void *
72047+acl_alloc(unsigned long len)
72048+{
72049+ void *ret = NULL;
72050+
72051+ if (!len || len > PAGE_SIZE)
72052+ goto out;
72053+
72054+ ret = kmalloc(len, GFP_KERNEL);
72055+
72056+ if (ret) {
72057+ if (alloc_push(ret)) {
72058+ kfree(ret);
72059+ ret = NULL;
72060+ }
72061+ }
72062+
72063+out:
72064+ return ret;
72065+}
72066+
72067+void *
72068+acl_alloc_num(unsigned long num, unsigned long len)
72069+{
72070+ if (!len || (num > (PAGE_SIZE / len)))
72071+ return NULL;
72072+
72073+ return acl_alloc(num * len);
72074+}
72075+
72076+void
72077+acl_free_all(void)
72078+{
72079+ if (!current_alloc_state->alloc_stack)
72080+ return;
72081+
72082+ while (alloc_pop()) ;
72083+
72084+ if (current_alloc_state->alloc_stack) {
72085+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72086+ kfree(current_alloc_state->alloc_stack);
72087+ else
72088+ vfree(current_alloc_state->alloc_stack);
72089+ }
72090+
72091+ current_alloc_state->alloc_stack = NULL;
72092+ current_alloc_state->alloc_stack_size = 1;
72093+ current_alloc_state->alloc_stack_next = 1;
72094+
72095+ return;
72096+}
72097+
72098+int
72099+acl_alloc_stack_init(unsigned long size)
72100+{
72101+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72102+ current_alloc_state->alloc_stack =
72103+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72104+ else
72105+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72106+
72107+ current_alloc_state->alloc_stack_size = size;
72108+ current_alloc_state->alloc_stack_next = 1;
72109+
72110+ if (!current_alloc_state->alloc_stack)
72111+ return 0;
72112+ else
72113+ return 1;
72114+}
72115diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72116new file mode 100644
72117index 0000000..1a94c11
72118--- /dev/null
72119+++ b/grsecurity/gracl_cap.c
72120@@ -0,0 +1,127 @@
72121+#include <linux/kernel.h>
72122+#include <linux/module.h>
72123+#include <linux/sched.h>
72124+#include <linux/gracl.h>
72125+#include <linux/grsecurity.h>
72126+#include <linux/grinternal.h>
72127+
72128+extern const char *captab_log[];
72129+extern int captab_log_entries;
72130+
72131+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72132+{
72133+ struct acl_subject_label *curracl;
72134+
72135+ if (!gr_acl_is_enabled())
72136+ return 1;
72137+
72138+ curracl = task->acl;
72139+
72140+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72141+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72142+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72143+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72144+ gr_to_filename(task->exec_file->f_path.dentry,
72145+ task->exec_file->f_path.mnt) : curracl->filename,
72146+ curracl->filename, 0UL,
72147+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72148+ return 1;
72149+ }
72150+
72151+ return 0;
72152+}
72153+
72154+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72155+{
72156+ struct acl_subject_label *curracl;
72157+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72158+ kernel_cap_t cap_audit = __cap_empty_set;
72159+
72160+ if (!gr_acl_is_enabled())
72161+ return 1;
72162+
72163+ curracl = task->acl;
72164+
72165+ cap_drop = curracl->cap_lower;
72166+ cap_mask = curracl->cap_mask;
72167+ cap_audit = curracl->cap_invert_audit;
72168+
72169+ while ((curracl = curracl->parent_subject)) {
72170+ /* if the cap isn't specified in the current computed mask but is specified in the
72171+ current level subject, and is lowered in the current level subject, then add
72172+ it to the set of dropped capabilities
72173+ otherwise, add the current level subject's mask to the current computed mask
72174+ */
72175+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72176+ cap_raise(cap_mask, cap);
72177+ if (cap_raised(curracl->cap_lower, cap))
72178+ cap_raise(cap_drop, cap);
72179+ if (cap_raised(curracl->cap_invert_audit, cap))
72180+ cap_raise(cap_audit, cap);
72181+ }
72182+ }
72183+
72184+ if (!cap_raised(cap_drop, cap)) {
72185+ if (cap_raised(cap_audit, cap))
72186+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72187+ return 1;
72188+ }
72189+
72190+ /* only learn the capability use if the process has the capability in the
72191+ general case, the two uses in sys.c of gr_learn_cap are an exception
72192+ to this rule to ensure any role transition involves what the full-learned
72193+ policy believes in a privileged process
72194+ */
72195+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72196+ return 1;
72197+
72198+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72199+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72200+
72201+ return 0;
72202+}
72203+
72204+int
72205+gr_acl_is_capable(const int cap)
72206+{
72207+ return gr_task_acl_is_capable(current, current_cred(), cap);
72208+}
72209+
72210+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72211+{
72212+ struct acl_subject_label *curracl;
72213+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72214+
72215+ if (!gr_acl_is_enabled())
72216+ return 1;
72217+
72218+ curracl = task->acl;
72219+
72220+ cap_drop = curracl->cap_lower;
72221+ cap_mask = curracl->cap_mask;
72222+
72223+ while ((curracl = curracl->parent_subject)) {
72224+ /* if the cap isn't specified in the current computed mask but is specified in the
72225+ current level subject, and is lowered in the current level subject, then add
72226+ it to the set of dropped capabilities
72227+ otherwise, add the current level subject's mask to the current computed mask
72228+ */
72229+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72230+ cap_raise(cap_mask, cap);
72231+ if (cap_raised(curracl->cap_lower, cap))
72232+ cap_raise(cap_drop, cap);
72233+ }
72234+ }
72235+
72236+ if (!cap_raised(cap_drop, cap))
72237+ return 1;
72238+
72239+ return 0;
72240+}
72241+
72242+int
72243+gr_acl_is_capable_nolog(const int cap)
72244+{
72245+ return gr_task_acl_is_capable_nolog(current, cap);
72246+}
72247+
72248diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72249new file mode 100644
72250index 0000000..a43dd06
72251--- /dev/null
72252+++ b/grsecurity/gracl_compat.c
72253@@ -0,0 +1,269 @@
72254+#include <linux/kernel.h>
72255+#include <linux/gracl.h>
72256+#include <linux/compat.h>
72257+#include <linux/gracl_compat.h>
72258+
72259+#include <asm/uaccess.h>
72260+
72261+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72262+{
72263+ struct gr_arg_wrapper_compat uwrapcompat;
72264+
72265+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72266+ return -EFAULT;
72267+
72268+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72269+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72270+ return -EINVAL;
72271+
72272+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72273+ uwrap->version = uwrapcompat.version;
72274+ uwrap->size = sizeof(struct gr_arg);
72275+
72276+ return 0;
72277+}
72278+
72279+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72280+{
72281+ struct gr_arg_compat argcompat;
72282+
72283+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72284+ return -EFAULT;
72285+
72286+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72287+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72288+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72289+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72290+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72291+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72292+
72293+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72294+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72295+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72296+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72297+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72298+ arg->segv_device = argcompat.segv_device;
72299+ arg->segv_inode = argcompat.segv_inode;
72300+ arg->segv_uid = argcompat.segv_uid;
72301+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72302+ arg->mode = argcompat.mode;
72303+
72304+ return 0;
72305+}
72306+
72307+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72308+{
72309+ struct acl_object_label_compat objcompat;
72310+
72311+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72312+ return -EFAULT;
72313+
72314+ obj->filename = compat_ptr(objcompat.filename);
72315+ obj->inode = objcompat.inode;
72316+ obj->device = objcompat.device;
72317+ obj->mode = objcompat.mode;
72318+
72319+ obj->nested = compat_ptr(objcompat.nested);
72320+ obj->globbed = compat_ptr(objcompat.globbed);
72321+
72322+ obj->prev = compat_ptr(objcompat.prev);
72323+ obj->next = compat_ptr(objcompat.next);
72324+
72325+ return 0;
72326+}
72327+
72328+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72329+{
72330+ unsigned int i;
72331+ struct acl_subject_label_compat subjcompat;
72332+
72333+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72334+ return -EFAULT;
72335+
72336+ subj->filename = compat_ptr(subjcompat.filename);
72337+ subj->inode = subjcompat.inode;
72338+ subj->device = subjcompat.device;
72339+ subj->mode = subjcompat.mode;
72340+ subj->cap_mask = subjcompat.cap_mask;
72341+ subj->cap_lower = subjcompat.cap_lower;
72342+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72343+
72344+ for (i = 0; i < GR_NLIMITS; i++) {
72345+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72346+ subj->res[i].rlim_cur = RLIM_INFINITY;
72347+ else
72348+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72349+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72350+ subj->res[i].rlim_max = RLIM_INFINITY;
72351+ else
72352+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72353+ }
72354+ subj->resmask = subjcompat.resmask;
72355+
72356+ subj->user_trans_type = subjcompat.user_trans_type;
72357+ subj->group_trans_type = subjcompat.group_trans_type;
72358+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72359+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72360+ subj->user_trans_num = subjcompat.user_trans_num;
72361+ subj->group_trans_num = subjcompat.group_trans_num;
72362+
72363+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72364+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72365+ subj->ip_type = subjcompat.ip_type;
72366+ subj->ips = compat_ptr(subjcompat.ips);
72367+ subj->ip_num = subjcompat.ip_num;
72368+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72369+
72370+ subj->crashes = subjcompat.crashes;
72371+ subj->expires = subjcompat.expires;
72372+
72373+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72374+ subj->hash = compat_ptr(subjcompat.hash);
72375+ subj->prev = compat_ptr(subjcompat.prev);
72376+ subj->next = compat_ptr(subjcompat.next);
72377+
72378+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72379+ subj->obj_hash_size = subjcompat.obj_hash_size;
72380+ subj->pax_flags = subjcompat.pax_flags;
72381+
72382+ return 0;
72383+}
72384+
72385+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72386+{
72387+ struct acl_role_label_compat rolecompat;
72388+
72389+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72390+ return -EFAULT;
72391+
72392+ role->rolename = compat_ptr(rolecompat.rolename);
72393+ role->uidgid = rolecompat.uidgid;
72394+ role->roletype = rolecompat.roletype;
72395+
72396+ role->auth_attempts = rolecompat.auth_attempts;
72397+ role->expires = rolecompat.expires;
72398+
72399+ role->root_label = compat_ptr(rolecompat.root_label);
72400+ role->hash = compat_ptr(rolecompat.hash);
72401+
72402+ role->prev = compat_ptr(rolecompat.prev);
72403+ role->next = compat_ptr(rolecompat.next);
72404+
72405+ role->transitions = compat_ptr(rolecompat.transitions);
72406+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72407+ role->domain_children = compat_ptr(rolecompat.domain_children);
72408+ role->domain_child_num = rolecompat.domain_child_num;
72409+
72410+ role->umask = rolecompat.umask;
72411+
72412+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72413+ role->subj_hash_size = rolecompat.subj_hash_size;
72414+
72415+ return 0;
72416+}
72417+
72418+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72419+{
72420+ struct role_allowed_ip_compat roleip_compat;
72421+
72422+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72423+ return -EFAULT;
72424+
72425+ roleip->addr = roleip_compat.addr;
72426+ roleip->netmask = roleip_compat.netmask;
72427+
72428+ roleip->prev = compat_ptr(roleip_compat.prev);
72429+ roleip->next = compat_ptr(roleip_compat.next);
72430+
72431+ return 0;
72432+}
72433+
72434+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72435+{
72436+ struct role_transition_compat trans_compat;
72437+
72438+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72439+ return -EFAULT;
72440+
72441+ trans->rolename = compat_ptr(trans_compat.rolename);
72442+
72443+ trans->prev = compat_ptr(trans_compat.prev);
72444+ trans->next = compat_ptr(trans_compat.next);
72445+
72446+ return 0;
72447+
72448+}
72449+
72450+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72451+{
72452+ struct gr_hash_struct_compat hash_compat;
72453+
72454+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72455+ return -EFAULT;
72456+
72457+ hash->table = compat_ptr(hash_compat.table);
72458+ hash->nametable = compat_ptr(hash_compat.nametable);
72459+ hash->first = compat_ptr(hash_compat.first);
72460+
72461+ hash->table_size = hash_compat.table_size;
72462+ hash->used_size = hash_compat.used_size;
72463+
72464+ hash->type = hash_compat.type;
72465+
72466+ return 0;
72467+}
72468+
72469+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72470+{
72471+ compat_uptr_t ptrcompat;
72472+
72473+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72474+ return -EFAULT;
72475+
72476+ *(void **)ptr = compat_ptr(ptrcompat);
72477+
72478+ return 0;
72479+}
72480+
72481+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72482+{
72483+ struct acl_ip_label_compat ip_compat;
72484+
72485+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72486+ return -EFAULT;
72487+
72488+ ip->iface = compat_ptr(ip_compat.iface);
72489+ ip->addr = ip_compat.addr;
72490+ ip->netmask = ip_compat.netmask;
72491+ ip->low = ip_compat.low;
72492+ ip->high = ip_compat.high;
72493+ ip->mode = ip_compat.mode;
72494+ ip->type = ip_compat.type;
72495+
72496+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72497+
72498+ ip->prev = compat_ptr(ip_compat.prev);
72499+ ip->next = compat_ptr(ip_compat.next);
72500+
72501+ return 0;
72502+}
72503+
72504+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72505+{
72506+ struct sprole_pw_compat pw_compat;
72507+
72508+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72509+ return -EFAULT;
72510+
72511+ pw->rolename = compat_ptr(pw_compat.rolename);
72512+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72513+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72514+
72515+ return 0;
72516+}
72517+
72518+size_t get_gr_arg_wrapper_size_compat(void)
72519+{
72520+ return sizeof(struct gr_arg_wrapper_compat);
72521+}
72522+
72523diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72524new file mode 100644
72525index 0000000..8ee8e4f
72526--- /dev/null
72527+++ b/grsecurity/gracl_fs.c
72528@@ -0,0 +1,447 @@
72529+#include <linux/kernel.h>
72530+#include <linux/sched.h>
72531+#include <linux/types.h>
72532+#include <linux/fs.h>
72533+#include <linux/file.h>
72534+#include <linux/stat.h>
72535+#include <linux/grsecurity.h>
72536+#include <linux/grinternal.h>
72537+#include <linux/gracl.h>
72538+
72539+umode_t
72540+gr_acl_umask(void)
72541+{
72542+ if (unlikely(!gr_acl_is_enabled()))
72543+ return 0;
72544+
72545+ return current->role->umask;
72546+}
72547+
72548+__u32
72549+gr_acl_handle_hidden_file(const struct dentry * dentry,
72550+ const struct vfsmount * mnt)
72551+{
72552+ __u32 mode;
72553+
72554+ if (unlikely(d_is_negative(dentry)))
72555+ return GR_FIND;
72556+
72557+ mode =
72558+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72559+
72560+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72561+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72562+ return mode;
72563+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72564+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72565+ return 0;
72566+ } else if (unlikely(!(mode & GR_FIND)))
72567+ return 0;
72568+
72569+ return GR_FIND;
72570+}
72571+
72572+__u32
72573+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72574+ int acc_mode)
72575+{
72576+ __u32 reqmode = GR_FIND;
72577+ __u32 mode;
72578+
72579+ if (unlikely(d_is_negative(dentry)))
72580+ return reqmode;
72581+
72582+ if (acc_mode & MAY_APPEND)
72583+ reqmode |= GR_APPEND;
72584+ else if (acc_mode & MAY_WRITE)
72585+ reqmode |= GR_WRITE;
72586+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72587+ reqmode |= GR_READ;
72588+
72589+ mode =
72590+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72591+ mnt);
72592+
72593+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72594+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72595+ reqmode & GR_READ ? " reading" : "",
72596+ reqmode & GR_WRITE ? " writing" : reqmode &
72597+ GR_APPEND ? " appending" : "");
72598+ return reqmode;
72599+ } else
72600+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72601+ {
72602+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72603+ reqmode & GR_READ ? " reading" : "",
72604+ reqmode & GR_WRITE ? " writing" : reqmode &
72605+ GR_APPEND ? " appending" : "");
72606+ return 0;
72607+ } else if (unlikely((mode & reqmode) != reqmode))
72608+ return 0;
72609+
72610+ return reqmode;
72611+}
72612+
72613+__u32
72614+gr_acl_handle_creat(const struct dentry * dentry,
72615+ const struct dentry * p_dentry,
72616+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72617+ const int imode)
72618+{
72619+ __u32 reqmode = GR_WRITE | GR_CREATE;
72620+ __u32 mode;
72621+
72622+ if (acc_mode & MAY_APPEND)
72623+ reqmode |= GR_APPEND;
72624+ // if a directory was required or the directory already exists, then
72625+ // don't count this open as a read
72626+ if ((acc_mode & MAY_READ) &&
72627+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72628+ reqmode |= GR_READ;
72629+ if ((open_flags & O_CREAT) &&
72630+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72631+ reqmode |= GR_SETID;
72632+
72633+ mode =
72634+ gr_check_create(dentry, p_dentry, p_mnt,
72635+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72636+
72637+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72638+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72639+ reqmode & GR_READ ? " reading" : "",
72640+ reqmode & GR_WRITE ? " writing" : reqmode &
72641+ GR_APPEND ? " appending" : "");
72642+ return reqmode;
72643+ } else
72644+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72645+ {
72646+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72647+ reqmode & GR_READ ? " reading" : "",
72648+ reqmode & GR_WRITE ? " writing" : reqmode &
72649+ GR_APPEND ? " appending" : "");
72650+ return 0;
72651+ } else if (unlikely((mode & reqmode) != reqmode))
72652+ return 0;
72653+
72654+ return reqmode;
72655+}
72656+
72657+__u32
72658+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72659+ const int fmode)
72660+{
72661+ __u32 mode, reqmode = GR_FIND;
72662+
72663+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72664+ reqmode |= GR_EXEC;
72665+ if (fmode & S_IWOTH)
72666+ reqmode |= GR_WRITE;
72667+ if (fmode & S_IROTH)
72668+ reqmode |= GR_READ;
72669+
72670+ mode =
72671+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72672+ mnt);
72673+
72674+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72675+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72676+ reqmode & GR_READ ? " reading" : "",
72677+ reqmode & GR_WRITE ? " writing" : "",
72678+ reqmode & GR_EXEC ? " executing" : "");
72679+ return reqmode;
72680+ } else
72681+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72682+ {
72683+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72684+ reqmode & GR_READ ? " reading" : "",
72685+ reqmode & GR_WRITE ? " writing" : "",
72686+ reqmode & GR_EXEC ? " executing" : "");
72687+ return 0;
72688+ } else if (unlikely((mode & reqmode) != reqmode))
72689+ return 0;
72690+
72691+ return reqmode;
72692+}
72693+
72694+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72695+{
72696+ __u32 mode;
72697+
72698+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72699+
72700+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72701+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72702+ return mode;
72703+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72704+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72705+ return 0;
72706+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72707+ return 0;
72708+
72709+ return (reqmode);
72710+}
72711+
72712+__u32
72713+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72714+{
72715+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72716+}
72717+
72718+__u32
72719+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72720+{
72721+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72722+}
72723+
72724+__u32
72725+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72726+{
72727+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72728+}
72729+
72730+__u32
72731+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72732+{
72733+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72734+}
72735+
72736+__u32
72737+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72738+ umode_t *modeptr)
72739+{
72740+ umode_t mode;
72741+
72742+ *modeptr &= ~gr_acl_umask();
72743+ mode = *modeptr;
72744+
72745+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72746+ return 1;
72747+
72748+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72749+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72750+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72751+ GR_CHMOD_ACL_MSG);
72752+ } else {
72753+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72754+ }
72755+}
72756+
72757+__u32
72758+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72759+{
72760+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72761+}
72762+
72763+__u32
72764+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72765+{
72766+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72767+}
72768+
72769+__u32
72770+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72771+{
72772+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72773+}
72774+
72775+__u32
72776+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72777+{
72778+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72779+}
72780+
72781+__u32
72782+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72783+{
72784+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72785+ GR_UNIXCONNECT_ACL_MSG);
72786+}
72787+
72788+/* hardlinks require at minimum create and link permission,
72789+ any additional privilege required is based on the
72790+ privilege of the file being linked to
72791+*/
72792+__u32
72793+gr_acl_handle_link(const struct dentry * new_dentry,
72794+ const struct dentry * parent_dentry,
72795+ const struct vfsmount * parent_mnt,
72796+ const struct dentry * old_dentry,
72797+ const struct vfsmount * old_mnt, const struct filename *to)
72798+{
72799+ __u32 mode;
72800+ __u32 needmode = GR_CREATE | GR_LINK;
72801+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72802+
72803+ mode =
72804+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72805+ old_mnt);
72806+
72807+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72808+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72809+ return mode;
72810+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72811+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72812+ return 0;
72813+ } else if (unlikely((mode & needmode) != needmode))
72814+ return 0;
72815+
72816+ return 1;
72817+}
72818+
72819+__u32
72820+gr_acl_handle_symlink(const struct dentry * new_dentry,
72821+ const struct dentry * parent_dentry,
72822+ const struct vfsmount * parent_mnt, const struct filename *from)
72823+{
72824+ __u32 needmode = GR_WRITE | GR_CREATE;
72825+ __u32 mode;
72826+
72827+ mode =
72828+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72829+ GR_CREATE | GR_AUDIT_CREATE |
72830+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72831+
72832+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72833+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72834+ return mode;
72835+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72836+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72837+ return 0;
72838+ } else if (unlikely((mode & needmode) != needmode))
72839+ return 0;
72840+
72841+ return (GR_WRITE | GR_CREATE);
72842+}
72843+
72844+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72845+{
72846+ __u32 mode;
72847+
72848+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72849+
72850+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72851+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72852+ return mode;
72853+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72854+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72855+ return 0;
72856+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72857+ return 0;
72858+
72859+ return (reqmode);
72860+}
72861+
72862+__u32
72863+gr_acl_handle_mknod(const struct dentry * new_dentry,
72864+ const struct dentry * parent_dentry,
72865+ const struct vfsmount * parent_mnt,
72866+ const int mode)
72867+{
72868+ __u32 reqmode = GR_WRITE | GR_CREATE;
72869+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72870+ reqmode |= GR_SETID;
72871+
72872+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72873+ reqmode, GR_MKNOD_ACL_MSG);
72874+}
72875+
72876+__u32
72877+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72878+ const struct dentry *parent_dentry,
72879+ const struct vfsmount *parent_mnt)
72880+{
72881+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72882+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72883+}
72884+
72885+#define RENAME_CHECK_SUCCESS(old, new) \
72886+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72887+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72888+
72889+int
72890+gr_acl_handle_rename(struct dentry *new_dentry,
72891+ struct dentry *parent_dentry,
72892+ const struct vfsmount *parent_mnt,
72893+ struct dentry *old_dentry,
72894+ struct inode *old_parent_inode,
72895+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72896+{
72897+ __u32 comp1, comp2;
72898+ int error = 0;
72899+
72900+ if (unlikely(!gr_acl_is_enabled()))
72901+ return 0;
72902+
72903+ if (flags & RENAME_EXCHANGE) {
72904+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72905+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72906+ GR_SUPPRESS, parent_mnt);
72907+ comp2 =
72908+ gr_search_file(old_dentry,
72909+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72910+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72911+ } else if (d_is_negative(new_dentry)) {
72912+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72913+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72914+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72915+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72916+ GR_DELETE | GR_AUDIT_DELETE |
72917+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72918+ GR_SUPPRESS, old_mnt);
72919+ } else {
72920+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72921+ GR_CREATE | GR_DELETE |
72922+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
72923+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72924+ GR_SUPPRESS, parent_mnt);
72925+ comp2 =
72926+ gr_search_file(old_dentry,
72927+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72928+ GR_DELETE | GR_AUDIT_DELETE |
72929+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72930+ }
72931+
72932+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
72933+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
72934+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72935+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
72936+ && !(comp2 & GR_SUPPRESS)) {
72937+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72938+ error = -EACCES;
72939+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
72940+ error = -EACCES;
72941+
72942+ return error;
72943+}
72944+
72945+void
72946+gr_acl_handle_exit(void)
72947+{
72948+ u16 id;
72949+ char *rolename;
72950+
72951+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
72952+ !(current->role->roletype & GR_ROLE_PERSIST))) {
72953+ id = current->acl_role_id;
72954+ rolename = current->role->rolename;
72955+ gr_set_acls(1);
72956+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
72957+ }
72958+
72959+ gr_put_exec_file(current);
72960+ return;
72961+}
72962+
72963+int
72964+gr_acl_handle_procpidmem(const struct task_struct *task)
72965+{
72966+ if (unlikely(!gr_acl_is_enabled()))
72967+ return 0;
72968+
72969+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
72970+ !(current->acl->mode & GR_POVERRIDE) &&
72971+ !(current->role->roletype & GR_ROLE_GOD))
72972+ return -EACCES;
72973+
72974+ return 0;
72975+}
72976diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
72977new file mode 100644
72978index 0000000..f056b81
72979--- /dev/null
72980+++ b/grsecurity/gracl_ip.c
72981@@ -0,0 +1,386 @@
72982+#include <linux/kernel.h>
72983+#include <asm/uaccess.h>
72984+#include <asm/errno.h>
72985+#include <net/sock.h>
72986+#include <linux/file.h>
72987+#include <linux/fs.h>
72988+#include <linux/net.h>
72989+#include <linux/in.h>
72990+#include <linux/skbuff.h>
72991+#include <linux/ip.h>
72992+#include <linux/udp.h>
72993+#include <linux/types.h>
72994+#include <linux/sched.h>
72995+#include <linux/netdevice.h>
72996+#include <linux/inetdevice.h>
72997+#include <linux/gracl.h>
72998+#include <linux/grsecurity.h>
72999+#include <linux/grinternal.h>
73000+
73001+#define GR_BIND 0x01
73002+#define GR_CONNECT 0x02
73003+#define GR_INVERT 0x04
73004+#define GR_BINDOVERRIDE 0x08
73005+#define GR_CONNECTOVERRIDE 0x10
73006+#define GR_SOCK_FAMILY 0x20
73007+
73008+static const char * gr_protocols[IPPROTO_MAX] = {
73009+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73010+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73011+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73012+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73013+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73014+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73015+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73016+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73017+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73018+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73019+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73020+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73021+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73022+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73023+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73024+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73025+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73026+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73027+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73028+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73029+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73030+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73031+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73032+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73033+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73034+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73035+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73036+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73037+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73038+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73039+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73040+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73041+ };
73042+
73043+static const char * gr_socktypes[SOCK_MAX] = {
73044+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73045+ "unknown:7", "unknown:8", "unknown:9", "packet"
73046+ };
73047+
73048+static const char * gr_sockfamilies[AF_MAX+1] = {
73049+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73050+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73051+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73052+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73053+ };
73054+
73055+const char *
73056+gr_proto_to_name(unsigned char proto)
73057+{
73058+ return gr_protocols[proto];
73059+}
73060+
73061+const char *
73062+gr_socktype_to_name(unsigned char type)
73063+{
73064+ return gr_socktypes[type];
73065+}
73066+
73067+const char *
73068+gr_sockfamily_to_name(unsigned char family)
73069+{
73070+ return gr_sockfamilies[family];
73071+}
73072+
73073+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73074+
73075+int
73076+gr_search_socket(const int domain, const int type, const int protocol)
73077+{
73078+ struct acl_subject_label *curr;
73079+ const struct cred *cred = current_cred();
73080+
73081+ if (unlikely(!gr_acl_is_enabled()))
73082+ goto exit;
73083+
73084+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73085+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73086+ goto exit; // let the kernel handle it
73087+
73088+ curr = current->acl;
73089+
73090+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73091+ /* the family is allowed, if this is PF_INET allow it only if
73092+ the extra sock type/protocol checks pass */
73093+ if (domain == PF_INET)
73094+ goto inet_check;
73095+ goto exit;
73096+ } else {
73097+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73098+ __u32 fakeip = 0;
73099+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73100+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73101+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73102+ gr_to_filename(current->exec_file->f_path.dentry,
73103+ current->exec_file->f_path.mnt) :
73104+ curr->filename, curr->filename,
73105+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73106+ &current->signal->saved_ip);
73107+ goto exit;
73108+ }
73109+ goto exit_fail;
73110+ }
73111+
73112+inet_check:
73113+ /* the rest of this checking is for IPv4 only */
73114+ if (!curr->ips)
73115+ goto exit;
73116+
73117+ if ((curr->ip_type & (1U << type)) &&
73118+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73119+ goto exit;
73120+
73121+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73122+ /* we don't place acls on raw sockets , and sometimes
73123+ dgram/ip sockets are opened for ioctl and not
73124+ bind/connect, so we'll fake a bind learn log */
73125+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73126+ __u32 fakeip = 0;
73127+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73128+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73129+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73130+ gr_to_filename(current->exec_file->f_path.dentry,
73131+ current->exec_file->f_path.mnt) :
73132+ curr->filename, curr->filename,
73133+ &fakeip, 0, type,
73134+ protocol, GR_CONNECT, &current->signal->saved_ip);
73135+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73136+ __u32 fakeip = 0;
73137+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73138+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73139+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73140+ gr_to_filename(current->exec_file->f_path.dentry,
73141+ current->exec_file->f_path.mnt) :
73142+ curr->filename, curr->filename,
73143+ &fakeip, 0, type,
73144+ protocol, GR_BIND, &current->signal->saved_ip);
73145+ }
73146+ /* we'll log when they use connect or bind */
73147+ goto exit;
73148+ }
73149+
73150+exit_fail:
73151+ if (domain == PF_INET)
73152+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73153+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73154+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73155+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73156+ gr_socktype_to_name(type), protocol);
73157+
73158+ return 0;
73159+exit:
73160+ return 1;
73161+}
73162+
73163+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73164+{
73165+ if ((ip->mode & mode) &&
73166+ (ip_port >= ip->low) &&
73167+ (ip_port <= ip->high) &&
73168+ ((ntohl(ip_addr) & our_netmask) ==
73169+ (ntohl(our_addr) & our_netmask))
73170+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73171+ && (ip->type & (1U << type))) {
73172+ if (ip->mode & GR_INVERT)
73173+ return 2; // specifically denied
73174+ else
73175+ return 1; // allowed
73176+ }
73177+
73178+ return 0; // not specifically allowed, may continue parsing
73179+}
73180+
73181+static int
73182+gr_search_connectbind(const int full_mode, struct sock *sk,
73183+ struct sockaddr_in *addr, const int type)
73184+{
73185+ char iface[IFNAMSIZ] = {0};
73186+ struct acl_subject_label *curr;
73187+ struct acl_ip_label *ip;
73188+ struct inet_sock *isk;
73189+ struct net_device *dev;
73190+ struct in_device *idev;
73191+ unsigned long i;
73192+ int ret;
73193+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73194+ __u32 ip_addr = 0;
73195+ __u32 our_addr;
73196+ __u32 our_netmask;
73197+ char *p;
73198+ __u16 ip_port = 0;
73199+ const struct cred *cred = current_cred();
73200+
73201+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73202+ return 0;
73203+
73204+ curr = current->acl;
73205+ isk = inet_sk(sk);
73206+
73207+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73208+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73209+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73210+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73211+ struct sockaddr_in saddr;
73212+ int err;
73213+
73214+ saddr.sin_family = AF_INET;
73215+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73216+ saddr.sin_port = isk->inet_sport;
73217+
73218+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73219+ if (err)
73220+ return err;
73221+
73222+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73223+ if (err)
73224+ return err;
73225+ }
73226+
73227+ if (!curr->ips)
73228+ return 0;
73229+
73230+ ip_addr = addr->sin_addr.s_addr;
73231+ ip_port = ntohs(addr->sin_port);
73232+
73233+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73234+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73235+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73236+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73237+ gr_to_filename(current->exec_file->f_path.dentry,
73238+ current->exec_file->f_path.mnt) :
73239+ curr->filename, curr->filename,
73240+ &ip_addr, ip_port, type,
73241+ sk->sk_protocol, mode, &current->signal->saved_ip);
73242+ return 0;
73243+ }
73244+
73245+ for (i = 0; i < curr->ip_num; i++) {
73246+ ip = *(curr->ips + i);
73247+ if (ip->iface != NULL) {
73248+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73249+ p = strchr(iface, ':');
73250+ if (p != NULL)
73251+ *p = '\0';
73252+ dev = dev_get_by_name(sock_net(sk), iface);
73253+ if (dev == NULL)
73254+ continue;
73255+ idev = in_dev_get(dev);
73256+ if (idev == NULL) {
73257+ dev_put(dev);
73258+ continue;
73259+ }
73260+ rcu_read_lock();
73261+ for_ifa(idev) {
73262+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73263+ our_addr = ifa->ifa_address;
73264+ our_netmask = 0xffffffff;
73265+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73266+ if (ret == 1) {
73267+ rcu_read_unlock();
73268+ in_dev_put(idev);
73269+ dev_put(dev);
73270+ return 0;
73271+ } else if (ret == 2) {
73272+ rcu_read_unlock();
73273+ in_dev_put(idev);
73274+ dev_put(dev);
73275+ goto denied;
73276+ }
73277+ }
73278+ } endfor_ifa(idev);
73279+ rcu_read_unlock();
73280+ in_dev_put(idev);
73281+ dev_put(dev);
73282+ } else {
73283+ our_addr = ip->addr;
73284+ our_netmask = ip->netmask;
73285+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73286+ if (ret == 1)
73287+ return 0;
73288+ else if (ret == 2)
73289+ goto denied;
73290+ }
73291+ }
73292+
73293+denied:
73294+ if (mode == GR_BIND)
73295+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73296+ else if (mode == GR_CONNECT)
73297+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73298+
73299+ return -EACCES;
73300+}
73301+
73302+int
73303+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73304+{
73305+ /* always allow disconnection of dgram sockets with connect */
73306+ if (addr->sin_family == AF_UNSPEC)
73307+ return 0;
73308+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73309+}
73310+
73311+int
73312+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73313+{
73314+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73315+}
73316+
73317+int gr_search_listen(struct socket *sock)
73318+{
73319+ struct sock *sk = sock->sk;
73320+ struct sockaddr_in addr;
73321+
73322+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73323+ addr.sin_port = inet_sk(sk)->inet_sport;
73324+
73325+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73326+}
73327+
73328+int gr_search_accept(struct socket *sock)
73329+{
73330+ struct sock *sk = sock->sk;
73331+ struct sockaddr_in addr;
73332+
73333+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73334+ addr.sin_port = inet_sk(sk)->inet_sport;
73335+
73336+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73337+}
73338+
73339+int
73340+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73341+{
73342+ if (addr)
73343+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73344+ else {
73345+ struct sockaddr_in sin;
73346+ const struct inet_sock *inet = inet_sk(sk);
73347+
73348+ sin.sin_addr.s_addr = inet->inet_daddr;
73349+ sin.sin_port = inet->inet_dport;
73350+
73351+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73352+ }
73353+}
73354+
73355+int
73356+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73357+{
73358+ struct sockaddr_in sin;
73359+
73360+ if (unlikely(skb->len < sizeof (struct udphdr)))
73361+ return 0; // skip this packet
73362+
73363+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73364+ sin.sin_port = udp_hdr(skb)->source;
73365+
73366+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73367+}
73368diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73369new file mode 100644
73370index 0000000..25f54ef
73371--- /dev/null
73372+++ b/grsecurity/gracl_learn.c
73373@@ -0,0 +1,207 @@
73374+#include <linux/kernel.h>
73375+#include <linux/mm.h>
73376+#include <linux/sched.h>
73377+#include <linux/poll.h>
73378+#include <linux/string.h>
73379+#include <linux/file.h>
73380+#include <linux/types.h>
73381+#include <linux/vmalloc.h>
73382+#include <linux/grinternal.h>
73383+
73384+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73385+ size_t count, loff_t *ppos);
73386+extern int gr_acl_is_enabled(void);
73387+
73388+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73389+static int gr_learn_attached;
73390+
73391+/* use a 512k buffer */
73392+#define LEARN_BUFFER_SIZE (512 * 1024)
73393+
73394+static DEFINE_SPINLOCK(gr_learn_lock);
73395+static DEFINE_MUTEX(gr_learn_user_mutex);
73396+
73397+/* we need to maintain two buffers, so that the kernel context of grlearn
73398+ uses a semaphore around the userspace copying, and the other kernel contexts
73399+ use a spinlock when copying into the buffer, since they cannot sleep
73400+*/
73401+static char *learn_buffer;
73402+static char *learn_buffer_user;
73403+static int learn_buffer_len;
73404+static int learn_buffer_user_len;
73405+
73406+static ssize_t
73407+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73408+{
73409+ DECLARE_WAITQUEUE(wait, current);
73410+ ssize_t retval = 0;
73411+
73412+ add_wait_queue(&learn_wait, &wait);
73413+ set_current_state(TASK_INTERRUPTIBLE);
73414+ do {
73415+ mutex_lock(&gr_learn_user_mutex);
73416+ spin_lock(&gr_learn_lock);
73417+ if (learn_buffer_len)
73418+ break;
73419+ spin_unlock(&gr_learn_lock);
73420+ mutex_unlock(&gr_learn_user_mutex);
73421+ if (file->f_flags & O_NONBLOCK) {
73422+ retval = -EAGAIN;
73423+ goto out;
73424+ }
73425+ if (signal_pending(current)) {
73426+ retval = -ERESTARTSYS;
73427+ goto out;
73428+ }
73429+
73430+ schedule();
73431+ } while (1);
73432+
73433+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73434+ learn_buffer_user_len = learn_buffer_len;
73435+ retval = learn_buffer_len;
73436+ learn_buffer_len = 0;
73437+
73438+ spin_unlock(&gr_learn_lock);
73439+
73440+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73441+ retval = -EFAULT;
73442+
73443+ mutex_unlock(&gr_learn_user_mutex);
73444+out:
73445+ set_current_state(TASK_RUNNING);
73446+ remove_wait_queue(&learn_wait, &wait);
73447+ return retval;
73448+}
73449+
73450+static unsigned int
73451+poll_learn(struct file * file, poll_table * wait)
73452+{
73453+ poll_wait(file, &learn_wait, wait);
73454+
73455+ if (learn_buffer_len)
73456+ return (POLLIN | POLLRDNORM);
73457+
73458+ return 0;
73459+}
73460+
73461+void
73462+gr_clear_learn_entries(void)
73463+{
73464+ char *tmp;
73465+
73466+ mutex_lock(&gr_learn_user_mutex);
73467+ spin_lock(&gr_learn_lock);
73468+ tmp = learn_buffer;
73469+ learn_buffer = NULL;
73470+ spin_unlock(&gr_learn_lock);
73471+ if (tmp)
73472+ vfree(tmp);
73473+ if (learn_buffer_user != NULL) {
73474+ vfree(learn_buffer_user);
73475+ learn_buffer_user = NULL;
73476+ }
73477+ learn_buffer_len = 0;
73478+ mutex_unlock(&gr_learn_user_mutex);
73479+
73480+ return;
73481+}
73482+
73483+void
73484+gr_add_learn_entry(const char *fmt, ...)
73485+{
73486+ va_list args;
73487+ unsigned int len;
73488+
73489+ if (!gr_learn_attached)
73490+ return;
73491+
73492+ spin_lock(&gr_learn_lock);
73493+
73494+ /* leave a gap at the end so we know when it's "full" but don't have to
73495+ compute the exact length of the string we're trying to append
73496+ */
73497+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73498+ spin_unlock(&gr_learn_lock);
73499+ wake_up_interruptible(&learn_wait);
73500+ return;
73501+ }
73502+ if (learn_buffer == NULL) {
73503+ spin_unlock(&gr_learn_lock);
73504+ return;
73505+ }
73506+
73507+ va_start(args, fmt);
73508+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73509+ va_end(args);
73510+
73511+ learn_buffer_len += len + 1;
73512+
73513+ spin_unlock(&gr_learn_lock);
73514+ wake_up_interruptible(&learn_wait);
73515+
73516+ return;
73517+}
73518+
73519+static int
73520+open_learn(struct inode *inode, struct file *file)
73521+{
73522+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73523+ return -EBUSY;
73524+ if (file->f_mode & FMODE_READ) {
73525+ int retval = 0;
73526+ mutex_lock(&gr_learn_user_mutex);
73527+ if (learn_buffer == NULL)
73528+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73529+ if (learn_buffer_user == NULL)
73530+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73531+ if (learn_buffer == NULL) {
73532+ retval = -ENOMEM;
73533+ goto out_error;
73534+ }
73535+ if (learn_buffer_user == NULL) {
73536+ retval = -ENOMEM;
73537+ goto out_error;
73538+ }
73539+ learn_buffer_len = 0;
73540+ learn_buffer_user_len = 0;
73541+ gr_learn_attached = 1;
73542+out_error:
73543+ mutex_unlock(&gr_learn_user_mutex);
73544+ return retval;
73545+ }
73546+ return 0;
73547+}
73548+
73549+static int
73550+close_learn(struct inode *inode, struct file *file)
73551+{
73552+ if (file->f_mode & FMODE_READ) {
73553+ char *tmp = NULL;
73554+ mutex_lock(&gr_learn_user_mutex);
73555+ spin_lock(&gr_learn_lock);
73556+ tmp = learn_buffer;
73557+ learn_buffer = NULL;
73558+ spin_unlock(&gr_learn_lock);
73559+ if (tmp)
73560+ vfree(tmp);
73561+ if (learn_buffer_user != NULL) {
73562+ vfree(learn_buffer_user);
73563+ learn_buffer_user = NULL;
73564+ }
73565+ learn_buffer_len = 0;
73566+ learn_buffer_user_len = 0;
73567+ gr_learn_attached = 0;
73568+ mutex_unlock(&gr_learn_user_mutex);
73569+ }
73570+
73571+ return 0;
73572+}
73573+
73574+const struct file_operations grsec_fops = {
73575+ .read = read_learn,
73576+ .write = write_grsec_handler,
73577+ .open = open_learn,
73578+ .release = close_learn,
73579+ .poll = poll_learn,
73580+};
73581diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73582new file mode 100644
73583index 0000000..fd26052
73584--- /dev/null
73585+++ b/grsecurity/gracl_policy.c
73586@@ -0,0 +1,1781 @@
73587+#include <linux/kernel.h>
73588+#include <linux/module.h>
73589+#include <linux/sched.h>
73590+#include <linux/mm.h>
73591+#include <linux/file.h>
73592+#include <linux/fs.h>
73593+#include <linux/namei.h>
73594+#include <linux/mount.h>
73595+#include <linux/tty.h>
73596+#include <linux/proc_fs.h>
73597+#include <linux/lglock.h>
73598+#include <linux/slab.h>
73599+#include <linux/vmalloc.h>
73600+#include <linux/types.h>
73601+#include <linux/sysctl.h>
73602+#include <linux/netdevice.h>
73603+#include <linux/ptrace.h>
73604+#include <linux/gracl.h>
73605+#include <linux/gralloc.h>
73606+#include <linux/security.h>
73607+#include <linux/grinternal.h>
73608+#include <linux/pid_namespace.h>
73609+#include <linux/stop_machine.h>
73610+#include <linux/fdtable.h>
73611+#include <linux/percpu.h>
73612+#include <linux/lglock.h>
73613+#include <linux/hugetlb.h>
73614+#include <linux/posix-timers.h>
73615+#include "../fs/mount.h"
73616+
73617+#include <asm/uaccess.h>
73618+#include <asm/errno.h>
73619+#include <asm/mman.h>
73620+
73621+extern struct gr_policy_state *polstate;
73622+
73623+#define FOR_EACH_ROLE_START(role) \
73624+ role = polstate->role_list; \
73625+ while (role) {
73626+
73627+#define FOR_EACH_ROLE_END(role) \
73628+ role = role->prev; \
73629+ }
73630+
73631+struct path gr_real_root;
73632+
73633+extern struct gr_alloc_state *current_alloc_state;
73634+
73635+u16 acl_sp_role_value;
73636+
73637+static DEFINE_MUTEX(gr_dev_mutex);
73638+
73639+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73640+extern void gr_clear_learn_entries(void);
73641+
73642+struct gr_arg *gr_usermode __read_only;
73643+unsigned char *gr_system_salt __read_only;
73644+unsigned char *gr_system_sum __read_only;
73645+
73646+static unsigned int gr_auth_attempts = 0;
73647+static unsigned long gr_auth_expires = 0UL;
73648+
73649+struct acl_object_label *fakefs_obj_rw;
73650+struct acl_object_label *fakefs_obj_rwx;
73651+
73652+extern int gr_init_uidset(void);
73653+extern void gr_free_uidset(void);
73654+extern void gr_remove_uid(uid_t uid);
73655+extern int gr_find_uid(uid_t uid);
73656+
73657+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73658+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73659+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73660+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73661+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73662+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73663+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73664+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73665+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73666+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73667+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73668+extern void assign_special_role(const char *rolename);
73669+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73670+extern int gr_rbac_disable(void *unused);
73671+extern void gr_enable_rbac_system(void);
73672+
73673+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73674+{
73675+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73676+ return -EFAULT;
73677+
73678+ return 0;
73679+}
73680+
73681+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73682+{
73683+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73684+ return -EFAULT;
73685+
73686+ return 0;
73687+}
73688+
73689+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73690+{
73691+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73692+ return -EFAULT;
73693+
73694+ return 0;
73695+}
73696+
73697+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73698+{
73699+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73700+ return -EFAULT;
73701+
73702+ return 0;
73703+}
73704+
73705+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73706+{
73707+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73708+ return -EFAULT;
73709+
73710+ return 0;
73711+}
73712+
73713+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73714+{
73715+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73716+ return -EFAULT;
73717+
73718+ return 0;
73719+}
73720+
73721+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73722+{
73723+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73724+ return -EFAULT;
73725+
73726+ return 0;
73727+}
73728+
73729+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73730+{
73731+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73732+ return -EFAULT;
73733+
73734+ return 0;
73735+}
73736+
73737+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73738+{
73739+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73740+ return -EFAULT;
73741+
73742+ return 0;
73743+}
73744+
73745+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73746+{
73747+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73748+ return -EFAULT;
73749+
73750+ if ((uwrap->version != GRSECURITY_VERSION) ||
73751+ (uwrap->size != sizeof(struct gr_arg)))
73752+ return -EINVAL;
73753+
73754+ return 0;
73755+}
73756+
73757+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73758+{
73759+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73760+ return -EFAULT;
73761+
73762+ return 0;
73763+}
73764+
73765+static size_t get_gr_arg_wrapper_size_normal(void)
73766+{
73767+ return sizeof(struct gr_arg_wrapper);
73768+}
73769+
73770+#ifdef CONFIG_COMPAT
73771+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73772+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73773+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73774+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73775+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73776+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73777+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73778+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73779+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73780+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73781+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73782+extern size_t get_gr_arg_wrapper_size_compat(void);
73783+
73784+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73785+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73786+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73787+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73788+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73789+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73790+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73791+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73792+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73793+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73794+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73795+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73796+
73797+#else
73798+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73799+#define copy_gr_arg copy_gr_arg_normal
73800+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73801+#define copy_acl_object_label copy_acl_object_label_normal
73802+#define copy_acl_subject_label copy_acl_subject_label_normal
73803+#define copy_acl_role_label copy_acl_role_label_normal
73804+#define copy_acl_ip_label copy_acl_ip_label_normal
73805+#define copy_pointer_from_array copy_pointer_from_array_normal
73806+#define copy_sprole_pw copy_sprole_pw_normal
73807+#define copy_role_transition copy_role_transition_normal
73808+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73809+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73810+#endif
73811+
73812+static struct acl_subject_label *
73813+lookup_subject_map(const struct acl_subject_label *userp)
73814+{
73815+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73816+ struct subject_map *match;
73817+
73818+ match = polstate->subj_map_set.s_hash[index];
73819+
73820+ while (match && match->user != userp)
73821+ match = match->next;
73822+
73823+ if (match != NULL)
73824+ return match->kernel;
73825+ else
73826+ return NULL;
73827+}
73828+
73829+static void
73830+insert_subj_map_entry(struct subject_map *subjmap)
73831+{
73832+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73833+ struct subject_map **curr;
73834+
73835+ subjmap->prev = NULL;
73836+
73837+ curr = &polstate->subj_map_set.s_hash[index];
73838+ if (*curr != NULL)
73839+ (*curr)->prev = subjmap;
73840+
73841+ subjmap->next = *curr;
73842+ *curr = subjmap;
73843+
73844+ return;
73845+}
73846+
73847+static void
73848+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73849+{
73850+ unsigned int index =
73851+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73852+ struct acl_role_label **curr;
73853+ struct acl_role_label *tmp, *tmp2;
73854+
73855+ curr = &polstate->acl_role_set.r_hash[index];
73856+
73857+ /* simple case, slot is empty, just set it to our role */
73858+ if (*curr == NULL) {
73859+ *curr = role;
73860+ } else {
73861+ /* example:
73862+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73863+ 2 -> 3
73864+ */
73865+ /* first check to see if we can already be reached via this slot */
73866+ tmp = *curr;
73867+ while (tmp && tmp != role)
73868+ tmp = tmp->next;
73869+ if (tmp == role) {
73870+ /* we don't need to add ourselves to this slot's chain */
73871+ return;
73872+ }
73873+ /* we need to add ourselves to this chain, two cases */
73874+ if (role->next == NULL) {
73875+ /* simple case, append the current chain to our role */
73876+ role->next = *curr;
73877+ *curr = role;
73878+ } else {
73879+ /* 1 -> 2 -> 3 -> 4
73880+ 2 -> 3 -> 4
73881+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73882+ */
73883+ /* trickier case: walk our role's chain until we find
73884+ the role for the start of the current slot's chain */
73885+ tmp = role;
73886+ tmp2 = *curr;
73887+ while (tmp->next && tmp->next != tmp2)
73888+ tmp = tmp->next;
73889+ if (tmp->next == tmp2) {
73890+ /* from example above, we found 3, so just
73891+ replace this slot's chain with ours */
73892+ *curr = role;
73893+ } else {
73894+ /* we didn't find a subset of our role's chain
73895+ in the current slot's chain, so append their
73896+ chain to ours, and set us as the first role in
73897+ the slot's chain
73898+
73899+ we could fold this case with the case above,
73900+ but making it explicit for clarity
73901+ */
73902+ tmp->next = tmp2;
73903+ *curr = role;
73904+ }
73905+ }
73906+ }
73907+
73908+ return;
73909+}
73910+
73911+static void
73912+insert_acl_role_label(struct acl_role_label *role)
73913+{
73914+ int i;
73915+
73916+ if (polstate->role_list == NULL) {
73917+ polstate->role_list = role;
73918+ role->prev = NULL;
73919+ } else {
73920+ role->prev = polstate->role_list;
73921+ polstate->role_list = role;
73922+ }
73923+
73924+ /* used for hash chains */
73925+ role->next = NULL;
73926+
73927+ if (role->roletype & GR_ROLE_DOMAIN) {
73928+ for (i = 0; i < role->domain_child_num; i++)
73929+ __insert_acl_role_label(role, role->domain_children[i]);
73930+ } else
73931+ __insert_acl_role_label(role, role->uidgid);
73932+}
73933+
73934+static int
73935+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
73936+{
73937+ struct name_entry **curr, *nentry;
73938+ struct inodev_entry *ientry;
73939+ unsigned int len = strlen(name);
73940+ unsigned int key = full_name_hash(name, len);
73941+ unsigned int index = key % polstate->name_set.n_size;
73942+
73943+ curr = &polstate->name_set.n_hash[index];
73944+
73945+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
73946+ curr = &((*curr)->next);
73947+
73948+ if (*curr != NULL)
73949+ return 1;
73950+
73951+ nentry = acl_alloc(sizeof (struct name_entry));
73952+ if (nentry == NULL)
73953+ return 0;
73954+ ientry = acl_alloc(sizeof (struct inodev_entry));
73955+ if (ientry == NULL)
73956+ return 0;
73957+ ientry->nentry = nentry;
73958+
73959+ nentry->key = key;
73960+ nentry->name = name;
73961+ nentry->inode = inode;
73962+ nentry->device = device;
73963+ nentry->len = len;
73964+ nentry->deleted = deleted;
73965+
73966+ nentry->prev = NULL;
73967+ curr = &polstate->name_set.n_hash[index];
73968+ if (*curr != NULL)
73969+ (*curr)->prev = nentry;
73970+ nentry->next = *curr;
73971+ *curr = nentry;
73972+
73973+ /* insert us into the table searchable by inode/dev */
73974+ __insert_inodev_entry(polstate, ientry);
73975+
73976+ return 1;
73977+}
73978+
73979+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
73980+
73981+static void *
73982+create_table(__u32 * len, int elementsize)
73983+{
73984+ unsigned int table_sizes[] = {
73985+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
73986+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
73987+ 4194301, 8388593, 16777213, 33554393, 67108859
73988+ };
73989+ void *newtable = NULL;
73990+ unsigned int pwr = 0;
73991+
73992+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
73993+ table_sizes[pwr] <= *len)
73994+ pwr++;
73995+
73996+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
73997+ return newtable;
73998+
73999+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74000+ newtable =
74001+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74002+ else
74003+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74004+
74005+ *len = table_sizes[pwr];
74006+
74007+ return newtable;
74008+}
74009+
74010+static int
74011+init_variables(const struct gr_arg *arg, bool reload)
74012+{
74013+ struct task_struct *reaper = init_pid_ns.child_reaper;
74014+ unsigned int stacksize;
74015+
74016+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74017+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74018+ polstate->name_set.n_size = arg->role_db.num_objects;
74019+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74020+
74021+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74022+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74023+ return 1;
74024+
74025+ if (!reload) {
74026+ if (!gr_init_uidset())
74027+ return 1;
74028+ }
74029+
74030+ /* set up the stack that holds allocation info */
74031+
74032+ stacksize = arg->role_db.num_pointers + 5;
74033+
74034+ if (!acl_alloc_stack_init(stacksize))
74035+ return 1;
74036+
74037+ if (!reload) {
74038+ /* grab reference for the real root dentry and vfsmount */
74039+ get_fs_root(reaper->fs, &gr_real_root);
74040+
74041+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74042+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74043+#endif
74044+
74045+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74046+ if (fakefs_obj_rw == NULL)
74047+ return 1;
74048+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74049+
74050+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74051+ if (fakefs_obj_rwx == NULL)
74052+ return 1;
74053+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74054+ }
74055+
74056+ polstate->subj_map_set.s_hash =
74057+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74058+ polstate->acl_role_set.r_hash =
74059+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74060+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74061+ polstate->inodev_set.i_hash =
74062+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74063+
74064+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74065+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74066+ return 1;
74067+
74068+ memset(polstate->subj_map_set.s_hash, 0,
74069+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74070+ memset(polstate->acl_role_set.r_hash, 0,
74071+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74072+ memset(polstate->name_set.n_hash, 0,
74073+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74074+ memset(polstate->inodev_set.i_hash, 0,
74075+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74076+
74077+ return 0;
74078+}
74079+
74080+/* free information not needed after startup
74081+ currently contains user->kernel pointer mappings for subjects
74082+*/
74083+
74084+static void
74085+free_init_variables(void)
74086+{
74087+ __u32 i;
74088+
74089+ if (polstate->subj_map_set.s_hash) {
74090+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74091+ if (polstate->subj_map_set.s_hash[i]) {
74092+ kfree(polstate->subj_map_set.s_hash[i]);
74093+ polstate->subj_map_set.s_hash[i] = NULL;
74094+ }
74095+ }
74096+
74097+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74098+ PAGE_SIZE)
74099+ kfree(polstate->subj_map_set.s_hash);
74100+ else
74101+ vfree(polstate->subj_map_set.s_hash);
74102+ }
74103+
74104+ return;
74105+}
74106+
74107+static void
74108+free_variables(bool reload)
74109+{
74110+ struct acl_subject_label *s;
74111+ struct acl_role_label *r;
74112+ struct task_struct *task, *task2;
74113+ unsigned int x;
74114+
74115+ if (!reload) {
74116+ gr_clear_learn_entries();
74117+
74118+ read_lock(&tasklist_lock);
74119+ do_each_thread(task2, task) {
74120+ task->acl_sp_role = 0;
74121+ task->acl_role_id = 0;
74122+ task->inherited = 0;
74123+ task->acl = NULL;
74124+ task->role = NULL;
74125+ } while_each_thread(task2, task);
74126+ read_unlock(&tasklist_lock);
74127+
74128+ kfree(fakefs_obj_rw);
74129+ fakefs_obj_rw = NULL;
74130+ kfree(fakefs_obj_rwx);
74131+ fakefs_obj_rwx = NULL;
74132+
74133+ /* release the reference to the real root dentry and vfsmount */
74134+ path_put(&gr_real_root);
74135+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74136+ }
74137+
74138+ /* free all object hash tables */
74139+
74140+ FOR_EACH_ROLE_START(r)
74141+ if (r->subj_hash == NULL)
74142+ goto next_role;
74143+ FOR_EACH_SUBJECT_START(r, s, x)
74144+ if (s->obj_hash == NULL)
74145+ break;
74146+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74147+ kfree(s->obj_hash);
74148+ else
74149+ vfree(s->obj_hash);
74150+ FOR_EACH_SUBJECT_END(s, x)
74151+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74152+ if (s->obj_hash == NULL)
74153+ break;
74154+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74155+ kfree(s->obj_hash);
74156+ else
74157+ vfree(s->obj_hash);
74158+ FOR_EACH_NESTED_SUBJECT_END(s)
74159+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74160+ kfree(r->subj_hash);
74161+ else
74162+ vfree(r->subj_hash);
74163+ r->subj_hash = NULL;
74164+next_role:
74165+ FOR_EACH_ROLE_END(r)
74166+
74167+ acl_free_all();
74168+
74169+ if (polstate->acl_role_set.r_hash) {
74170+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74171+ PAGE_SIZE)
74172+ kfree(polstate->acl_role_set.r_hash);
74173+ else
74174+ vfree(polstate->acl_role_set.r_hash);
74175+ }
74176+ if (polstate->name_set.n_hash) {
74177+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74178+ PAGE_SIZE)
74179+ kfree(polstate->name_set.n_hash);
74180+ else
74181+ vfree(polstate->name_set.n_hash);
74182+ }
74183+
74184+ if (polstate->inodev_set.i_hash) {
74185+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74186+ PAGE_SIZE)
74187+ kfree(polstate->inodev_set.i_hash);
74188+ else
74189+ vfree(polstate->inodev_set.i_hash);
74190+ }
74191+
74192+ if (!reload)
74193+ gr_free_uidset();
74194+
74195+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74196+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74197+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74198+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74199+
74200+ polstate->default_role = NULL;
74201+ polstate->kernel_role = NULL;
74202+ polstate->role_list = NULL;
74203+
74204+ return;
74205+}
74206+
74207+static struct acl_subject_label *
74208+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74209+
74210+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74211+{
74212+ unsigned int len = strnlen_user(*name, maxlen);
74213+ char *tmp;
74214+
74215+ if (!len || len >= maxlen)
74216+ return -EINVAL;
74217+
74218+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74219+ return -ENOMEM;
74220+
74221+ if (copy_from_user(tmp, *name, len))
74222+ return -EFAULT;
74223+
74224+ tmp[len-1] = '\0';
74225+ *name = tmp;
74226+
74227+ return 0;
74228+}
74229+
74230+static int
74231+copy_user_glob(struct acl_object_label *obj)
74232+{
74233+ struct acl_object_label *g_tmp, **guser;
74234+ int error;
74235+
74236+ if (obj->globbed == NULL)
74237+ return 0;
74238+
74239+ guser = &obj->globbed;
74240+ while (*guser) {
74241+ g_tmp = (struct acl_object_label *)
74242+ acl_alloc(sizeof (struct acl_object_label));
74243+ if (g_tmp == NULL)
74244+ return -ENOMEM;
74245+
74246+ if (copy_acl_object_label(g_tmp, *guser))
74247+ return -EFAULT;
74248+
74249+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74250+ if (error)
74251+ return error;
74252+
74253+ *guser = g_tmp;
74254+ guser = &(g_tmp->next);
74255+ }
74256+
74257+ return 0;
74258+}
74259+
74260+static int
74261+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74262+ struct acl_role_label *role)
74263+{
74264+ struct acl_object_label *o_tmp;
74265+ int ret;
74266+
74267+ while (userp) {
74268+ if ((o_tmp = (struct acl_object_label *)
74269+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74270+ return -ENOMEM;
74271+
74272+ if (copy_acl_object_label(o_tmp, userp))
74273+ return -EFAULT;
74274+
74275+ userp = o_tmp->prev;
74276+
74277+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74278+ if (ret)
74279+ return ret;
74280+
74281+ insert_acl_obj_label(o_tmp, subj);
74282+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74283+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74284+ return -ENOMEM;
74285+
74286+ ret = copy_user_glob(o_tmp);
74287+ if (ret)
74288+ return ret;
74289+
74290+ if (o_tmp->nested) {
74291+ int already_copied;
74292+
74293+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74294+ if (IS_ERR(o_tmp->nested))
74295+ return PTR_ERR(o_tmp->nested);
74296+
74297+ /* insert into nested subject list if we haven't copied this one yet
74298+ to prevent duplicate entries */
74299+ if (!already_copied) {
74300+ o_tmp->nested->next = role->hash->first;
74301+ role->hash->first = o_tmp->nested;
74302+ }
74303+ }
74304+ }
74305+
74306+ return 0;
74307+}
74308+
74309+static __u32
74310+count_user_subjs(struct acl_subject_label *userp)
74311+{
74312+ struct acl_subject_label s_tmp;
74313+ __u32 num = 0;
74314+
74315+ while (userp) {
74316+ if (copy_acl_subject_label(&s_tmp, userp))
74317+ break;
74318+
74319+ userp = s_tmp.prev;
74320+ }
74321+
74322+ return num;
74323+}
74324+
74325+static int
74326+copy_user_allowedips(struct acl_role_label *rolep)
74327+{
74328+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74329+
74330+ ruserip = rolep->allowed_ips;
74331+
74332+ while (ruserip) {
74333+ rlast = rtmp;
74334+
74335+ if ((rtmp = (struct role_allowed_ip *)
74336+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74337+ return -ENOMEM;
74338+
74339+ if (copy_role_allowed_ip(rtmp, ruserip))
74340+ return -EFAULT;
74341+
74342+ ruserip = rtmp->prev;
74343+
74344+ if (!rlast) {
74345+ rtmp->prev = NULL;
74346+ rolep->allowed_ips = rtmp;
74347+ } else {
74348+ rlast->next = rtmp;
74349+ rtmp->prev = rlast;
74350+ }
74351+
74352+ if (!ruserip)
74353+ rtmp->next = NULL;
74354+ }
74355+
74356+ return 0;
74357+}
74358+
74359+static int
74360+copy_user_transitions(struct acl_role_label *rolep)
74361+{
74362+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74363+ int error;
74364+
74365+ rusertp = rolep->transitions;
74366+
74367+ while (rusertp) {
74368+ rlast = rtmp;
74369+
74370+ if ((rtmp = (struct role_transition *)
74371+ acl_alloc(sizeof (struct role_transition))) == NULL)
74372+ return -ENOMEM;
74373+
74374+ if (copy_role_transition(rtmp, rusertp))
74375+ return -EFAULT;
74376+
74377+ rusertp = rtmp->prev;
74378+
74379+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74380+ if (error)
74381+ return error;
74382+
74383+ if (!rlast) {
74384+ rtmp->prev = NULL;
74385+ rolep->transitions = rtmp;
74386+ } else {
74387+ rlast->next = rtmp;
74388+ rtmp->prev = rlast;
74389+ }
74390+
74391+ if (!rusertp)
74392+ rtmp->next = NULL;
74393+ }
74394+
74395+ return 0;
74396+}
74397+
74398+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74399+{
74400+ struct acl_object_label o_tmp;
74401+ __u32 num = 0;
74402+
74403+ while (userp) {
74404+ if (copy_acl_object_label(&o_tmp, userp))
74405+ break;
74406+
74407+ userp = o_tmp.prev;
74408+ num++;
74409+ }
74410+
74411+ return num;
74412+}
74413+
74414+static struct acl_subject_label *
74415+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74416+{
74417+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74418+ __u32 num_objs;
74419+ struct acl_ip_label **i_tmp, *i_utmp2;
74420+ struct gr_hash_struct ghash;
74421+ struct subject_map *subjmap;
74422+ unsigned int i_num;
74423+ int err;
74424+
74425+ if (already_copied != NULL)
74426+ *already_copied = 0;
74427+
74428+ s_tmp = lookup_subject_map(userp);
74429+
74430+ /* we've already copied this subject into the kernel, just return
74431+ the reference to it, and don't copy it over again
74432+ */
74433+ if (s_tmp) {
74434+ if (already_copied != NULL)
74435+ *already_copied = 1;
74436+ return(s_tmp);
74437+ }
74438+
74439+ if ((s_tmp = (struct acl_subject_label *)
74440+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74441+ return ERR_PTR(-ENOMEM);
74442+
74443+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74444+ if (subjmap == NULL)
74445+ return ERR_PTR(-ENOMEM);
74446+
74447+ subjmap->user = userp;
74448+ subjmap->kernel = s_tmp;
74449+ insert_subj_map_entry(subjmap);
74450+
74451+ if (copy_acl_subject_label(s_tmp, userp))
74452+ return ERR_PTR(-EFAULT);
74453+
74454+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74455+ if (err)
74456+ return ERR_PTR(err);
74457+
74458+ if (!strcmp(s_tmp->filename, "/"))
74459+ role->root_label = s_tmp;
74460+
74461+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74462+ return ERR_PTR(-EFAULT);
74463+
74464+ /* copy user and group transition tables */
74465+
74466+ if (s_tmp->user_trans_num) {
74467+ uid_t *uidlist;
74468+
74469+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74470+ if (uidlist == NULL)
74471+ return ERR_PTR(-ENOMEM);
74472+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74473+ return ERR_PTR(-EFAULT);
74474+
74475+ s_tmp->user_transitions = uidlist;
74476+ }
74477+
74478+ if (s_tmp->group_trans_num) {
74479+ gid_t *gidlist;
74480+
74481+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74482+ if (gidlist == NULL)
74483+ return ERR_PTR(-ENOMEM);
74484+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74485+ return ERR_PTR(-EFAULT);
74486+
74487+ s_tmp->group_transitions = gidlist;
74488+ }
74489+
74490+ /* set up object hash table */
74491+ num_objs = count_user_objs(ghash.first);
74492+
74493+ s_tmp->obj_hash_size = num_objs;
74494+ s_tmp->obj_hash =
74495+ (struct acl_object_label **)
74496+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74497+
74498+ if (!s_tmp->obj_hash)
74499+ return ERR_PTR(-ENOMEM);
74500+
74501+ memset(s_tmp->obj_hash, 0,
74502+ s_tmp->obj_hash_size *
74503+ sizeof (struct acl_object_label *));
74504+
74505+ /* add in objects */
74506+ err = copy_user_objs(ghash.first, s_tmp, role);
74507+
74508+ if (err)
74509+ return ERR_PTR(err);
74510+
74511+ /* set pointer for parent subject */
74512+ if (s_tmp->parent_subject) {
74513+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74514+
74515+ if (IS_ERR(s_tmp2))
74516+ return s_tmp2;
74517+
74518+ s_tmp->parent_subject = s_tmp2;
74519+ }
74520+
74521+ /* add in ip acls */
74522+
74523+ if (!s_tmp->ip_num) {
74524+ s_tmp->ips = NULL;
74525+ goto insert;
74526+ }
74527+
74528+ i_tmp =
74529+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74530+ sizeof (struct acl_ip_label *));
74531+
74532+ if (!i_tmp)
74533+ return ERR_PTR(-ENOMEM);
74534+
74535+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74536+ *(i_tmp + i_num) =
74537+ (struct acl_ip_label *)
74538+ acl_alloc(sizeof (struct acl_ip_label));
74539+ if (!*(i_tmp + i_num))
74540+ return ERR_PTR(-ENOMEM);
74541+
74542+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74543+ return ERR_PTR(-EFAULT);
74544+
74545+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74546+ return ERR_PTR(-EFAULT);
74547+
74548+ if ((*(i_tmp + i_num))->iface == NULL)
74549+ continue;
74550+
74551+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74552+ if (err)
74553+ return ERR_PTR(err);
74554+ }
74555+
74556+ s_tmp->ips = i_tmp;
74557+
74558+insert:
74559+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74560+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74561+ return ERR_PTR(-ENOMEM);
74562+
74563+ return s_tmp;
74564+}
74565+
74566+static int
74567+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74568+{
74569+ struct acl_subject_label s_pre;
74570+ struct acl_subject_label * ret;
74571+ int err;
74572+
74573+ while (userp) {
74574+ if (copy_acl_subject_label(&s_pre, userp))
74575+ return -EFAULT;
74576+
74577+ ret = do_copy_user_subj(userp, role, NULL);
74578+
74579+ err = PTR_ERR(ret);
74580+ if (IS_ERR(ret))
74581+ return err;
74582+
74583+ insert_acl_subj_label(ret, role);
74584+
74585+ userp = s_pre.prev;
74586+ }
74587+
74588+ return 0;
74589+}
74590+
74591+static int
74592+copy_user_acl(struct gr_arg *arg)
74593+{
74594+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74595+ struct acl_subject_label *subj_list;
74596+ struct sprole_pw *sptmp;
74597+ struct gr_hash_struct *ghash;
74598+ uid_t *domainlist;
74599+ unsigned int r_num;
74600+ int err = 0;
74601+ __u16 i;
74602+ __u32 num_subjs;
74603+
74604+ /* we need a default and kernel role */
74605+ if (arg->role_db.num_roles < 2)
74606+ return -EINVAL;
74607+
74608+ /* copy special role authentication info from userspace */
74609+
74610+ polstate->num_sprole_pws = arg->num_sprole_pws;
74611+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74612+
74613+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74614+ return -ENOMEM;
74615+
74616+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74617+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74618+ if (!sptmp)
74619+ return -ENOMEM;
74620+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74621+ return -EFAULT;
74622+
74623+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74624+ if (err)
74625+ return err;
74626+
74627+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74628+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74629+#endif
74630+
74631+ polstate->acl_special_roles[i] = sptmp;
74632+ }
74633+
74634+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74635+
74636+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74637+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74638+
74639+ if (!r_tmp)
74640+ return -ENOMEM;
74641+
74642+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74643+ return -EFAULT;
74644+
74645+ if (copy_acl_role_label(r_tmp, r_utmp2))
74646+ return -EFAULT;
74647+
74648+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74649+ if (err)
74650+ return err;
74651+
74652+ if (!strcmp(r_tmp->rolename, "default")
74653+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74654+ polstate->default_role = r_tmp;
74655+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74656+ polstate->kernel_role = r_tmp;
74657+ }
74658+
74659+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74660+ return -ENOMEM;
74661+
74662+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74663+ return -EFAULT;
74664+
74665+ r_tmp->hash = ghash;
74666+
74667+ num_subjs = count_user_subjs(r_tmp->hash->first);
74668+
74669+ r_tmp->subj_hash_size = num_subjs;
74670+ r_tmp->subj_hash =
74671+ (struct acl_subject_label **)
74672+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74673+
74674+ if (!r_tmp->subj_hash)
74675+ return -ENOMEM;
74676+
74677+ err = copy_user_allowedips(r_tmp);
74678+ if (err)
74679+ return err;
74680+
74681+ /* copy domain info */
74682+ if (r_tmp->domain_children != NULL) {
74683+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74684+ if (domainlist == NULL)
74685+ return -ENOMEM;
74686+
74687+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74688+ return -EFAULT;
74689+
74690+ r_tmp->domain_children = domainlist;
74691+ }
74692+
74693+ err = copy_user_transitions(r_tmp);
74694+ if (err)
74695+ return err;
74696+
74697+ memset(r_tmp->subj_hash, 0,
74698+ r_tmp->subj_hash_size *
74699+ sizeof (struct acl_subject_label *));
74700+
74701+ /* acquire the list of subjects, then NULL out
74702+ the list prior to parsing the subjects for this role,
74703+ as during this parsing the list is replaced with a list
74704+ of *nested* subjects for the role
74705+ */
74706+ subj_list = r_tmp->hash->first;
74707+
74708+ /* set nested subject list to null */
74709+ r_tmp->hash->first = NULL;
74710+
74711+ err = copy_user_subjs(subj_list, r_tmp);
74712+
74713+ if (err)
74714+ return err;
74715+
74716+ insert_acl_role_label(r_tmp);
74717+ }
74718+
74719+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74720+ return -EINVAL;
74721+
74722+ return err;
74723+}
74724+
74725+static int gracl_reload_apply_policies(void *reload)
74726+{
74727+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74728+ struct task_struct *task, *task2;
74729+ struct acl_role_label *role, *rtmp;
74730+ struct acl_subject_label *subj;
74731+ const struct cred *cred;
74732+ int role_applied;
74733+ int ret = 0;
74734+
74735+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74736+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74737+
74738+ /* first make sure we'll be able to apply the new policy cleanly */
74739+ do_each_thread(task2, task) {
74740+ if (task->exec_file == NULL)
74741+ continue;
74742+ role_applied = 0;
74743+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74744+ /* preserve special roles */
74745+ FOR_EACH_ROLE_START(role)
74746+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74747+ rtmp = task->role;
74748+ task->role = role;
74749+ role_applied = 1;
74750+ break;
74751+ }
74752+ FOR_EACH_ROLE_END(role)
74753+ }
74754+ if (!role_applied) {
74755+ cred = __task_cred(task);
74756+ rtmp = task->role;
74757+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74758+ }
74759+ /* this handles non-nested inherited subjects, nested subjects will still
74760+ be dropped currently */
74761+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74762+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74763+ /* change the role back so that we've made no modifications to the policy */
74764+ task->role = rtmp;
74765+
74766+ if (subj == NULL || task->tmpacl == NULL) {
74767+ ret = -EINVAL;
74768+ goto out;
74769+ }
74770+ } while_each_thread(task2, task);
74771+
74772+ /* now actually apply the policy */
74773+
74774+ do_each_thread(task2, task) {
74775+ if (task->exec_file) {
74776+ role_applied = 0;
74777+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74778+ /* preserve special roles */
74779+ FOR_EACH_ROLE_START(role)
74780+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74781+ task->role = role;
74782+ role_applied = 1;
74783+ break;
74784+ }
74785+ FOR_EACH_ROLE_END(role)
74786+ }
74787+ if (!role_applied) {
74788+ cred = __task_cred(task);
74789+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74790+ }
74791+ /* this handles non-nested inherited subjects, nested subjects will still
74792+ be dropped currently */
74793+ if (!reload_state->oldmode && task->inherited)
74794+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74795+ else {
74796+ /* looked up and tagged to the task previously */
74797+ subj = task->tmpacl;
74798+ }
74799+ /* subj will be non-null */
74800+ __gr_apply_subject_to_task(polstate, task, subj);
74801+ if (reload_state->oldmode) {
74802+ task->acl_role_id = 0;
74803+ task->acl_sp_role = 0;
74804+ task->inherited = 0;
74805+ }
74806+ } else {
74807+ // it's a kernel process
74808+ task->role = polstate->kernel_role;
74809+ task->acl = polstate->kernel_role->root_label;
74810+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74811+ task->acl->mode &= ~GR_PROCFIND;
74812+#endif
74813+ }
74814+ } while_each_thread(task2, task);
74815+
74816+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74817+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74818+
74819+out:
74820+
74821+ return ret;
74822+}
74823+
74824+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74825+{
74826+ struct gr_reload_state new_reload_state = { };
74827+ int err;
74828+
74829+ new_reload_state.oldpolicy_ptr = polstate;
74830+ new_reload_state.oldalloc_ptr = current_alloc_state;
74831+ new_reload_state.oldmode = oldmode;
74832+
74833+ current_alloc_state = &new_reload_state.newalloc;
74834+ polstate = &new_reload_state.newpolicy;
74835+
74836+ /* everything relevant is now saved off, copy in the new policy */
74837+ if (init_variables(args, true)) {
74838+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74839+ err = -ENOMEM;
74840+ goto error;
74841+ }
74842+
74843+ err = copy_user_acl(args);
74844+ free_init_variables();
74845+ if (err)
74846+ goto error;
74847+ /* the new policy is copied in, with the old policy available via saved_state
74848+ first go through applying roles, making sure to preserve special roles
74849+ then apply new subjects, making sure to preserve inherited and nested subjects,
74850+ though currently only inherited subjects will be preserved
74851+ */
74852+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74853+ if (err)
74854+ goto error;
74855+
74856+ /* we've now applied the new policy, so restore the old policy state to free it */
74857+ polstate = &new_reload_state.oldpolicy;
74858+ current_alloc_state = &new_reload_state.oldalloc;
74859+ free_variables(true);
74860+
74861+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74862+ to running_polstate/current_alloc_state inside stop_machine
74863+ */
74864+ err = 0;
74865+ goto out;
74866+error:
74867+ /* on error of loading the new policy, we'll just keep the previous
74868+ policy set around
74869+ */
74870+ free_variables(true);
74871+
74872+ /* doesn't affect runtime, but maintains consistent state */
74873+out:
74874+ polstate = new_reload_state.oldpolicy_ptr;
74875+ current_alloc_state = new_reload_state.oldalloc_ptr;
74876+
74877+ return err;
74878+}
74879+
74880+static int
74881+gracl_init(struct gr_arg *args)
74882+{
74883+ int error = 0;
74884+
74885+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74886+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74887+
74888+ if (init_variables(args, false)) {
74889+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74890+ error = -ENOMEM;
74891+ goto out;
74892+ }
74893+
74894+ error = copy_user_acl(args);
74895+ free_init_variables();
74896+ if (error)
74897+ goto out;
74898+
74899+ error = gr_set_acls(0);
74900+ if (error)
74901+ goto out;
74902+
74903+ gr_enable_rbac_system();
74904+
74905+ return 0;
74906+
74907+out:
74908+ free_variables(false);
74909+ return error;
74910+}
74911+
74912+static int
74913+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74914+ unsigned char **sum)
74915+{
74916+ struct acl_role_label *r;
74917+ struct role_allowed_ip *ipp;
74918+ struct role_transition *trans;
74919+ unsigned int i;
74920+ int found = 0;
74921+ u32 curr_ip = current->signal->curr_ip;
74922+
74923+ current->signal->saved_ip = curr_ip;
74924+
74925+ /* check transition table */
74926+
74927+ for (trans = current->role->transitions; trans; trans = trans->next) {
74928+ if (!strcmp(rolename, trans->rolename)) {
74929+ found = 1;
74930+ break;
74931+ }
74932+ }
74933+
74934+ if (!found)
74935+ return 0;
74936+
74937+ /* handle special roles that do not require authentication
74938+ and check ip */
74939+
74940+ FOR_EACH_ROLE_START(r)
74941+ if (!strcmp(rolename, r->rolename) &&
74942+ (r->roletype & GR_ROLE_SPECIAL)) {
74943+ found = 0;
74944+ if (r->allowed_ips != NULL) {
74945+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
74946+ if ((ntohl(curr_ip) & ipp->netmask) ==
74947+ (ntohl(ipp->addr) & ipp->netmask))
74948+ found = 1;
74949+ }
74950+ } else
74951+ found = 2;
74952+ if (!found)
74953+ return 0;
74954+
74955+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
74956+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
74957+ *salt = NULL;
74958+ *sum = NULL;
74959+ return 1;
74960+ }
74961+ }
74962+ FOR_EACH_ROLE_END(r)
74963+
74964+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74965+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
74966+ *salt = polstate->acl_special_roles[i]->salt;
74967+ *sum = polstate->acl_special_roles[i]->sum;
74968+ return 1;
74969+ }
74970+ }
74971+
74972+ return 0;
74973+}
74974+
74975+int gr_check_secure_terminal(struct task_struct *task)
74976+{
74977+ struct task_struct *p, *p2, *p3;
74978+ struct files_struct *files;
74979+ struct fdtable *fdt;
74980+ struct file *our_file = NULL, *file;
74981+ int i;
74982+
74983+ if (task->signal->tty == NULL)
74984+ return 1;
74985+
74986+ files = get_files_struct(task);
74987+ if (files != NULL) {
74988+ rcu_read_lock();
74989+ fdt = files_fdtable(files);
74990+ for (i=0; i < fdt->max_fds; i++) {
74991+ file = fcheck_files(files, i);
74992+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
74993+ get_file(file);
74994+ our_file = file;
74995+ }
74996+ }
74997+ rcu_read_unlock();
74998+ put_files_struct(files);
74999+ }
75000+
75001+ if (our_file == NULL)
75002+ return 1;
75003+
75004+ read_lock(&tasklist_lock);
75005+ do_each_thread(p2, p) {
75006+ files = get_files_struct(p);
75007+ if (files == NULL ||
75008+ (p->signal && p->signal->tty == task->signal->tty)) {
75009+ if (files != NULL)
75010+ put_files_struct(files);
75011+ continue;
75012+ }
75013+ rcu_read_lock();
75014+ fdt = files_fdtable(files);
75015+ for (i=0; i < fdt->max_fds; i++) {
75016+ file = fcheck_files(files, i);
75017+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75018+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75019+ p3 = task;
75020+ while (task_pid_nr(p3) > 0) {
75021+ if (p3 == p)
75022+ break;
75023+ p3 = p3->real_parent;
75024+ }
75025+ if (p3 == p)
75026+ break;
75027+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75028+ gr_handle_alertkill(p);
75029+ rcu_read_unlock();
75030+ put_files_struct(files);
75031+ read_unlock(&tasklist_lock);
75032+ fput(our_file);
75033+ return 0;
75034+ }
75035+ }
75036+ rcu_read_unlock();
75037+ put_files_struct(files);
75038+ } while_each_thread(p2, p);
75039+ read_unlock(&tasklist_lock);
75040+
75041+ fput(our_file);
75042+ return 1;
75043+}
75044+
75045+ssize_t
75046+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75047+{
75048+ struct gr_arg_wrapper uwrap;
75049+ unsigned char *sprole_salt = NULL;
75050+ unsigned char *sprole_sum = NULL;
75051+ int error = 0;
75052+ int error2 = 0;
75053+ size_t req_count = 0;
75054+ unsigned char oldmode = 0;
75055+
75056+ mutex_lock(&gr_dev_mutex);
75057+
75058+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75059+ error = -EPERM;
75060+ goto out;
75061+ }
75062+
75063+#ifdef CONFIG_COMPAT
75064+ pax_open_kernel();
75065+ if (is_compat_task()) {
75066+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75067+ copy_gr_arg = &copy_gr_arg_compat;
75068+ copy_acl_object_label = &copy_acl_object_label_compat;
75069+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75070+ copy_acl_role_label = &copy_acl_role_label_compat;
75071+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75072+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75073+ copy_role_transition = &copy_role_transition_compat;
75074+ copy_sprole_pw = &copy_sprole_pw_compat;
75075+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75076+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75077+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75078+ } else {
75079+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75080+ copy_gr_arg = &copy_gr_arg_normal;
75081+ copy_acl_object_label = &copy_acl_object_label_normal;
75082+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75083+ copy_acl_role_label = &copy_acl_role_label_normal;
75084+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75085+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75086+ copy_role_transition = &copy_role_transition_normal;
75087+ copy_sprole_pw = &copy_sprole_pw_normal;
75088+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75089+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75090+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75091+ }
75092+ pax_close_kernel();
75093+#endif
75094+
75095+ req_count = get_gr_arg_wrapper_size();
75096+
75097+ if (count != req_count) {
75098+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75099+ error = -EINVAL;
75100+ goto out;
75101+ }
75102+
75103+
75104+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75105+ gr_auth_expires = 0;
75106+ gr_auth_attempts = 0;
75107+ }
75108+
75109+ error = copy_gr_arg_wrapper(buf, &uwrap);
75110+ if (error)
75111+ goto out;
75112+
75113+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75114+ if (error)
75115+ goto out;
75116+
75117+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75118+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75119+ time_after(gr_auth_expires, get_seconds())) {
75120+ error = -EBUSY;
75121+ goto out;
75122+ }
75123+
75124+ /* if non-root trying to do anything other than use a special role,
75125+ do not attempt authentication, do not count towards authentication
75126+ locking
75127+ */
75128+
75129+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75130+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75131+ gr_is_global_nonroot(current_uid())) {
75132+ error = -EPERM;
75133+ goto out;
75134+ }
75135+
75136+ /* ensure pw and special role name are null terminated */
75137+
75138+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75139+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75140+
75141+ /* Okay.
75142+ * We have our enough of the argument structure..(we have yet
75143+ * to copy_from_user the tables themselves) . Copy the tables
75144+ * only if we need them, i.e. for loading operations. */
75145+
75146+ switch (gr_usermode->mode) {
75147+ case GR_STATUS:
75148+ if (gr_acl_is_enabled()) {
75149+ error = 1;
75150+ if (!gr_check_secure_terminal(current))
75151+ error = 3;
75152+ } else
75153+ error = 2;
75154+ goto out;
75155+ case GR_SHUTDOWN:
75156+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75157+ stop_machine(gr_rbac_disable, NULL, NULL);
75158+ free_variables(false);
75159+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75160+ memset(gr_system_salt, 0, GR_SALT_LEN);
75161+ memset(gr_system_sum, 0, GR_SHA_LEN);
75162+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75163+ } else if (gr_acl_is_enabled()) {
75164+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75165+ error = -EPERM;
75166+ } else {
75167+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75168+ error = -EAGAIN;
75169+ }
75170+ break;
75171+ case GR_ENABLE:
75172+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75173+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75174+ else {
75175+ if (gr_acl_is_enabled())
75176+ error = -EAGAIN;
75177+ else
75178+ error = error2;
75179+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75180+ }
75181+ break;
75182+ case GR_OLDRELOAD:
75183+ oldmode = 1;
75184+ case GR_RELOAD:
75185+ if (!gr_acl_is_enabled()) {
75186+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75187+ error = -EAGAIN;
75188+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75189+ error2 = gracl_reload(gr_usermode, oldmode);
75190+ if (!error2)
75191+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75192+ else {
75193+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75194+ error = error2;
75195+ }
75196+ } else {
75197+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75198+ error = -EPERM;
75199+ }
75200+ break;
75201+ case GR_SEGVMOD:
75202+ if (unlikely(!gr_acl_is_enabled())) {
75203+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75204+ error = -EAGAIN;
75205+ break;
75206+ }
75207+
75208+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75209+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75210+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75211+ struct acl_subject_label *segvacl;
75212+ segvacl =
75213+ lookup_acl_subj_label(gr_usermode->segv_inode,
75214+ gr_usermode->segv_device,
75215+ current->role);
75216+ if (segvacl) {
75217+ segvacl->crashes = 0;
75218+ segvacl->expires = 0;
75219+ }
75220+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75221+ gr_remove_uid(gr_usermode->segv_uid);
75222+ }
75223+ } else {
75224+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75225+ error = -EPERM;
75226+ }
75227+ break;
75228+ case GR_SPROLE:
75229+ case GR_SPROLEPAM:
75230+ if (unlikely(!gr_acl_is_enabled())) {
75231+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75232+ error = -EAGAIN;
75233+ break;
75234+ }
75235+
75236+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75237+ current->role->expires = 0;
75238+ current->role->auth_attempts = 0;
75239+ }
75240+
75241+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75242+ time_after(current->role->expires, get_seconds())) {
75243+ error = -EBUSY;
75244+ goto out;
75245+ }
75246+
75247+ if (lookup_special_role_auth
75248+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75249+ && ((!sprole_salt && !sprole_sum)
75250+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75251+ char *p = "";
75252+ assign_special_role(gr_usermode->sp_role);
75253+ read_lock(&tasklist_lock);
75254+ if (current->real_parent)
75255+ p = current->real_parent->role->rolename;
75256+ read_unlock(&tasklist_lock);
75257+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75258+ p, acl_sp_role_value);
75259+ } else {
75260+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75261+ error = -EPERM;
75262+ if(!(current->role->auth_attempts++))
75263+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75264+
75265+ goto out;
75266+ }
75267+ break;
75268+ case GR_UNSPROLE:
75269+ if (unlikely(!gr_acl_is_enabled())) {
75270+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75271+ error = -EAGAIN;
75272+ break;
75273+ }
75274+
75275+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75276+ char *p = "";
75277+ int i = 0;
75278+
75279+ read_lock(&tasklist_lock);
75280+ if (current->real_parent) {
75281+ p = current->real_parent->role->rolename;
75282+ i = current->real_parent->acl_role_id;
75283+ }
75284+ read_unlock(&tasklist_lock);
75285+
75286+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75287+ gr_set_acls(1);
75288+ } else {
75289+ error = -EPERM;
75290+ goto out;
75291+ }
75292+ break;
75293+ default:
75294+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75295+ error = -EINVAL;
75296+ break;
75297+ }
75298+
75299+ if (error != -EPERM)
75300+ goto out;
75301+
75302+ if(!(gr_auth_attempts++))
75303+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75304+
75305+ out:
75306+ mutex_unlock(&gr_dev_mutex);
75307+
75308+ if (!error)
75309+ error = req_count;
75310+
75311+ return error;
75312+}
75313+
75314+int
75315+gr_set_acls(const int type)
75316+{
75317+ struct task_struct *task, *task2;
75318+ struct acl_role_label *role = current->role;
75319+ struct acl_subject_label *subj;
75320+ __u16 acl_role_id = current->acl_role_id;
75321+ const struct cred *cred;
75322+ int ret;
75323+
75324+ rcu_read_lock();
75325+ read_lock(&tasklist_lock);
75326+ read_lock(&grsec_exec_file_lock);
75327+ do_each_thread(task2, task) {
75328+ /* check to see if we're called from the exit handler,
75329+ if so, only replace ACLs that have inherited the admin
75330+ ACL */
75331+
75332+ if (type && (task->role != role ||
75333+ task->acl_role_id != acl_role_id))
75334+ continue;
75335+
75336+ task->acl_role_id = 0;
75337+ task->acl_sp_role = 0;
75338+ task->inherited = 0;
75339+
75340+ if (task->exec_file) {
75341+ cred = __task_cred(task);
75342+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75343+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75344+ if (subj == NULL) {
75345+ ret = -EINVAL;
75346+ read_unlock(&grsec_exec_file_lock);
75347+ read_unlock(&tasklist_lock);
75348+ rcu_read_unlock();
75349+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75350+ return ret;
75351+ }
75352+ __gr_apply_subject_to_task(polstate, task, subj);
75353+ } else {
75354+ // it's a kernel process
75355+ task->role = polstate->kernel_role;
75356+ task->acl = polstate->kernel_role->root_label;
75357+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75358+ task->acl->mode &= ~GR_PROCFIND;
75359+#endif
75360+ }
75361+ } while_each_thread(task2, task);
75362+ read_unlock(&grsec_exec_file_lock);
75363+ read_unlock(&tasklist_lock);
75364+ rcu_read_unlock();
75365+
75366+ return 0;
75367+}
75368diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75369new file mode 100644
75370index 0000000..39645c9
75371--- /dev/null
75372+++ b/grsecurity/gracl_res.c
75373@@ -0,0 +1,68 @@
75374+#include <linux/kernel.h>
75375+#include <linux/sched.h>
75376+#include <linux/gracl.h>
75377+#include <linux/grinternal.h>
75378+
75379+static const char *restab_log[] = {
75380+ [RLIMIT_CPU] = "RLIMIT_CPU",
75381+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75382+ [RLIMIT_DATA] = "RLIMIT_DATA",
75383+ [RLIMIT_STACK] = "RLIMIT_STACK",
75384+ [RLIMIT_CORE] = "RLIMIT_CORE",
75385+ [RLIMIT_RSS] = "RLIMIT_RSS",
75386+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75387+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75388+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75389+ [RLIMIT_AS] = "RLIMIT_AS",
75390+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75391+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75392+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75393+ [RLIMIT_NICE] = "RLIMIT_NICE",
75394+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75395+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75396+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75397+};
75398+
75399+void
75400+gr_log_resource(const struct task_struct *task,
75401+ const int res, const unsigned long wanted, const int gt)
75402+{
75403+ const struct cred *cred;
75404+ unsigned long rlim;
75405+
75406+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75407+ return;
75408+
75409+ // not yet supported resource
75410+ if (unlikely(!restab_log[res]))
75411+ return;
75412+
75413+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75414+ rlim = task_rlimit_max(task, res);
75415+ else
75416+ rlim = task_rlimit(task, res);
75417+
75418+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75419+ return;
75420+
75421+ rcu_read_lock();
75422+ cred = __task_cred(task);
75423+
75424+ if (res == RLIMIT_NPROC &&
75425+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75426+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75427+ goto out_rcu_unlock;
75428+ else if (res == RLIMIT_MEMLOCK &&
75429+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75430+ goto out_rcu_unlock;
75431+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75432+ goto out_rcu_unlock;
75433+ rcu_read_unlock();
75434+
75435+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75436+
75437+ return;
75438+out_rcu_unlock:
75439+ rcu_read_unlock();
75440+ return;
75441+}
75442diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75443new file mode 100644
75444index 0000000..218b66b
75445--- /dev/null
75446+++ b/grsecurity/gracl_segv.c
75447@@ -0,0 +1,324 @@
75448+#include <linux/kernel.h>
75449+#include <linux/mm.h>
75450+#include <asm/uaccess.h>
75451+#include <asm/errno.h>
75452+#include <asm/mman.h>
75453+#include <net/sock.h>
75454+#include <linux/file.h>
75455+#include <linux/fs.h>
75456+#include <linux/net.h>
75457+#include <linux/in.h>
75458+#include <linux/slab.h>
75459+#include <linux/types.h>
75460+#include <linux/sched.h>
75461+#include <linux/timer.h>
75462+#include <linux/gracl.h>
75463+#include <linux/grsecurity.h>
75464+#include <linux/grinternal.h>
75465+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75466+#include <linux/magic.h>
75467+#include <linux/pagemap.h>
75468+#include "../fs/btrfs/async-thread.h"
75469+#include "../fs/btrfs/ctree.h"
75470+#include "../fs/btrfs/btrfs_inode.h"
75471+#endif
75472+
75473+static struct crash_uid *uid_set;
75474+static unsigned short uid_used;
75475+static DEFINE_SPINLOCK(gr_uid_lock);
75476+extern rwlock_t gr_inode_lock;
75477+extern struct acl_subject_label *
75478+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75479+ struct acl_role_label *role);
75480+
75481+static inline dev_t __get_dev(const struct dentry *dentry)
75482+{
75483+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75484+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75485+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75486+ else
75487+#endif
75488+ return dentry->d_sb->s_dev;
75489+}
75490+
75491+static inline u64 __get_ino(const struct dentry *dentry)
75492+{
75493+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75494+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75495+ return btrfs_ino(dentry->d_inode);
75496+ else
75497+#endif
75498+ return dentry->d_inode->i_ino;
75499+}
75500+
75501+int
75502+gr_init_uidset(void)
75503+{
75504+ uid_set =
75505+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75506+ uid_used = 0;
75507+
75508+ return uid_set ? 1 : 0;
75509+}
75510+
75511+void
75512+gr_free_uidset(void)
75513+{
75514+ if (uid_set) {
75515+ struct crash_uid *tmpset;
75516+ spin_lock(&gr_uid_lock);
75517+ tmpset = uid_set;
75518+ uid_set = NULL;
75519+ uid_used = 0;
75520+ spin_unlock(&gr_uid_lock);
75521+ if (tmpset)
75522+ kfree(tmpset);
75523+ }
75524+
75525+ return;
75526+}
75527+
75528+int
75529+gr_find_uid(const uid_t uid)
75530+{
75531+ struct crash_uid *tmp = uid_set;
75532+ uid_t buid;
75533+ int low = 0, high = uid_used - 1, mid;
75534+
75535+ while (high >= low) {
75536+ mid = (low + high) >> 1;
75537+ buid = tmp[mid].uid;
75538+ if (buid == uid)
75539+ return mid;
75540+ if (buid > uid)
75541+ high = mid - 1;
75542+ if (buid < uid)
75543+ low = mid + 1;
75544+ }
75545+
75546+ return -1;
75547+}
75548+
75549+static __inline__ void
75550+gr_insertsort(void)
75551+{
75552+ unsigned short i, j;
75553+ struct crash_uid index;
75554+
75555+ for (i = 1; i < uid_used; i++) {
75556+ index = uid_set[i];
75557+ j = i;
75558+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75559+ uid_set[j] = uid_set[j - 1];
75560+ j--;
75561+ }
75562+ uid_set[j] = index;
75563+ }
75564+
75565+ return;
75566+}
75567+
75568+static __inline__ void
75569+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75570+{
75571+ int loc;
75572+ uid_t uid = GR_GLOBAL_UID(kuid);
75573+
75574+ if (uid_used == GR_UIDTABLE_MAX)
75575+ return;
75576+
75577+ loc = gr_find_uid(uid);
75578+
75579+ if (loc >= 0) {
75580+ uid_set[loc].expires = expires;
75581+ return;
75582+ }
75583+
75584+ uid_set[uid_used].uid = uid;
75585+ uid_set[uid_used].expires = expires;
75586+ uid_used++;
75587+
75588+ gr_insertsort();
75589+
75590+ return;
75591+}
75592+
75593+void
75594+gr_remove_uid(const unsigned short loc)
75595+{
75596+ unsigned short i;
75597+
75598+ for (i = loc + 1; i < uid_used; i++)
75599+ uid_set[i - 1] = uid_set[i];
75600+
75601+ uid_used--;
75602+
75603+ return;
75604+}
75605+
75606+int
75607+gr_check_crash_uid(const kuid_t kuid)
75608+{
75609+ int loc;
75610+ int ret = 0;
75611+ uid_t uid;
75612+
75613+ if (unlikely(!gr_acl_is_enabled()))
75614+ return 0;
75615+
75616+ uid = GR_GLOBAL_UID(kuid);
75617+
75618+ spin_lock(&gr_uid_lock);
75619+ loc = gr_find_uid(uid);
75620+
75621+ if (loc < 0)
75622+ goto out_unlock;
75623+
75624+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75625+ gr_remove_uid(loc);
75626+ else
75627+ ret = 1;
75628+
75629+out_unlock:
75630+ spin_unlock(&gr_uid_lock);
75631+ return ret;
75632+}
75633+
75634+static __inline__ int
75635+proc_is_setxid(const struct cred *cred)
75636+{
75637+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75638+ !uid_eq(cred->uid, cred->fsuid))
75639+ return 1;
75640+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75641+ !gid_eq(cred->gid, cred->fsgid))
75642+ return 1;
75643+
75644+ return 0;
75645+}
75646+
75647+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75648+
75649+void
75650+gr_handle_crash(struct task_struct *task, const int sig)
75651+{
75652+ struct acl_subject_label *curr;
75653+ struct task_struct *tsk, *tsk2;
75654+ const struct cred *cred;
75655+ const struct cred *cred2;
75656+
75657+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75658+ return;
75659+
75660+ if (unlikely(!gr_acl_is_enabled()))
75661+ return;
75662+
75663+ curr = task->acl;
75664+
75665+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75666+ return;
75667+
75668+ if (time_before_eq(curr->expires, get_seconds())) {
75669+ curr->expires = 0;
75670+ curr->crashes = 0;
75671+ }
75672+
75673+ curr->crashes++;
75674+
75675+ if (!curr->expires)
75676+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75677+
75678+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75679+ time_after(curr->expires, get_seconds())) {
75680+ rcu_read_lock();
75681+ cred = __task_cred(task);
75682+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75683+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75684+ spin_lock(&gr_uid_lock);
75685+ gr_insert_uid(cred->uid, curr->expires);
75686+ spin_unlock(&gr_uid_lock);
75687+ curr->expires = 0;
75688+ curr->crashes = 0;
75689+ read_lock(&tasklist_lock);
75690+ do_each_thread(tsk2, tsk) {
75691+ cred2 = __task_cred(tsk);
75692+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75693+ gr_fake_force_sig(SIGKILL, tsk);
75694+ } while_each_thread(tsk2, tsk);
75695+ read_unlock(&tasklist_lock);
75696+ } else {
75697+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75698+ read_lock(&tasklist_lock);
75699+ read_lock(&grsec_exec_file_lock);
75700+ do_each_thread(tsk2, tsk) {
75701+ if (likely(tsk != task)) {
75702+ // if this thread has the same subject as the one that triggered
75703+ // RES_CRASH and it's the same binary, kill it
75704+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75705+ gr_fake_force_sig(SIGKILL, tsk);
75706+ }
75707+ } while_each_thread(tsk2, tsk);
75708+ read_unlock(&grsec_exec_file_lock);
75709+ read_unlock(&tasklist_lock);
75710+ }
75711+ rcu_read_unlock();
75712+ }
75713+
75714+ return;
75715+}
75716+
75717+int
75718+gr_check_crash_exec(const struct file *filp)
75719+{
75720+ struct acl_subject_label *curr;
75721+ struct dentry *dentry;
75722+
75723+ if (unlikely(!gr_acl_is_enabled()))
75724+ return 0;
75725+
75726+ read_lock(&gr_inode_lock);
75727+ dentry = filp->f_path.dentry;
75728+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75729+ current->role);
75730+ read_unlock(&gr_inode_lock);
75731+
75732+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75733+ (!curr->crashes && !curr->expires))
75734+ return 0;
75735+
75736+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75737+ time_after(curr->expires, get_seconds()))
75738+ return 1;
75739+ else if (time_before_eq(curr->expires, get_seconds())) {
75740+ curr->crashes = 0;
75741+ curr->expires = 0;
75742+ }
75743+
75744+ return 0;
75745+}
75746+
75747+void
75748+gr_handle_alertkill(struct task_struct *task)
75749+{
75750+ struct acl_subject_label *curracl;
75751+ __u32 curr_ip;
75752+ struct task_struct *p, *p2;
75753+
75754+ if (unlikely(!gr_acl_is_enabled()))
75755+ return;
75756+
75757+ curracl = task->acl;
75758+ curr_ip = task->signal->curr_ip;
75759+
75760+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75761+ read_lock(&tasklist_lock);
75762+ do_each_thread(p2, p) {
75763+ if (p->signal->curr_ip == curr_ip)
75764+ gr_fake_force_sig(SIGKILL, p);
75765+ } while_each_thread(p2, p);
75766+ read_unlock(&tasklist_lock);
75767+ } else if (curracl->mode & GR_KILLPROC)
75768+ gr_fake_force_sig(SIGKILL, task);
75769+
75770+ return;
75771+}
75772diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75773new file mode 100644
75774index 0000000..6b0c9cc
75775--- /dev/null
75776+++ b/grsecurity/gracl_shm.c
75777@@ -0,0 +1,40 @@
75778+#include <linux/kernel.h>
75779+#include <linux/mm.h>
75780+#include <linux/sched.h>
75781+#include <linux/file.h>
75782+#include <linux/ipc.h>
75783+#include <linux/gracl.h>
75784+#include <linux/grsecurity.h>
75785+#include <linux/grinternal.h>
75786+
75787+int
75788+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75789+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75790+{
75791+ struct task_struct *task;
75792+
75793+ if (!gr_acl_is_enabled())
75794+ return 1;
75795+
75796+ rcu_read_lock();
75797+ read_lock(&tasklist_lock);
75798+
75799+ task = find_task_by_vpid(shm_cprid);
75800+
75801+ if (unlikely(!task))
75802+ task = find_task_by_vpid(shm_lapid);
75803+
75804+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75805+ (task_pid_nr(task) == shm_lapid)) &&
75806+ (task->acl->mode & GR_PROTSHM) &&
75807+ (task->acl != current->acl))) {
75808+ read_unlock(&tasklist_lock);
75809+ rcu_read_unlock();
75810+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75811+ return 0;
75812+ }
75813+ read_unlock(&tasklist_lock);
75814+ rcu_read_unlock();
75815+
75816+ return 1;
75817+}
75818diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75819new file mode 100644
75820index 0000000..bc0be01
75821--- /dev/null
75822+++ b/grsecurity/grsec_chdir.c
75823@@ -0,0 +1,19 @@
75824+#include <linux/kernel.h>
75825+#include <linux/sched.h>
75826+#include <linux/fs.h>
75827+#include <linux/file.h>
75828+#include <linux/grsecurity.h>
75829+#include <linux/grinternal.h>
75830+
75831+void
75832+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75833+{
75834+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75835+ if ((grsec_enable_chdir && grsec_enable_group &&
75836+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75837+ !grsec_enable_group)) {
75838+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75839+ }
75840+#endif
75841+ return;
75842+}
75843diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75844new file mode 100644
75845index 0000000..114ea4f
75846--- /dev/null
75847+++ b/grsecurity/grsec_chroot.c
75848@@ -0,0 +1,467 @@
75849+#include <linux/kernel.h>
75850+#include <linux/module.h>
75851+#include <linux/sched.h>
75852+#include <linux/file.h>
75853+#include <linux/fs.h>
75854+#include <linux/mount.h>
75855+#include <linux/types.h>
75856+#include "../fs/mount.h"
75857+#include <linux/grsecurity.h>
75858+#include <linux/grinternal.h>
75859+
75860+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75861+int gr_init_ran;
75862+#endif
75863+
75864+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75865+{
75866+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75867+ struct dentry *tmpd = dentry;
75868+
75869+ read_seqlock_excl(&mount_lock);
75870+ write_seqlock(&rename_lock);
75871+
75872+ while (tmpd != mnt->mnt_root) {
75873+ atomic_inc(&tmpd->chroot_refcnt);
75874+ tmpd = tmpd->d_parent;
75875+ }
75876+ atomic_inc(&tmpd->chroot_refcnt);
75877+
75878+ write_sequnlock(&rename_lock);
75879+ read_sequnlock_excl(&mount_lock);
75880+#endif
75881+}
75882+
75883+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75884+{
75885+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75886+ struct dentry *tmpd = dentry;
75887+
75888+ read_seqlock_excl(&mount_lock);
75889+ write_seqlock(&rename_lock);
75890+
75891+ while (tmpd != mnt->mnt_root) {
75892+ atomic_dec(&tmpd->chroot_refcnt);
75893+ tmpd = tmpd->d_parent;
75894+ }
75895+ atomic_dec(&tmpd->chroot_refcnt);
75896+
75897+ write_sequnlock(&rename_lock);
75898+ read_sequnlock_excl(&mount_lock);
75899+#endif
75900+}
75901+
75902+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75903+static struct dentry *get_closest_chroot(struct dentry *dentry)
75904+{
75905+ write_seqlock(&rename_lock);
75906+ do {
75907+ if (atomic_read(&dentry->chroot_refcnt)) {
75908+ write_sequnlock(&rename_lock);
75909+ return dentry;
75910+ }
75911+ dentry = dentry->d_parent;
75912+ } while (!IS_ROOT(dentry));
75913+ write_sequnlock(&rename_lock);
75914+ return NULL;
75915+}
75916+#endif
75917+
75918+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75919+ struct dentry *newdentry, struct vfsmount *newmnt)
75920+{
75921+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75922+ struct dentry *chroot;
75923+
75924+ if (unlikely(!grsec_enable_chroot_rename))
75925+ return 0;
75926+
75927+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
75928+ return 0;
75929+
75930+ chroot = get_closest_chroot(olddentry);
75931+
75932+ if (chroot == NULL)
75933+ return 0;
75934+
75935+ if (is_subdir(newdentry, chroot))
75936+ return 0;
75937+
75938+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
75939+
75940+ return 1;
75941+#else
75942+ return 0;
75943+#endif
75944+}
75945+
75946+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
75947+{
75948+#ifdef CONFIG_GRKERNSEC
75949+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
75950+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
75951+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75952+ && gr_init_ran
75953+#endif
75954+ )
75955+ task->gr_is_chrooted = 1;
75956+ else {
75957+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75958+ if (task_pid_nr(task) == 1 && !gr_init_ran)
75959+ gr_init_ran = 1;
75960+#endif
75961+ task->gr_is_chrooted = 0;
75962+ }
75963+
75964+ task->gr_chroot_dentry = path->dentry;
75965+#endif
75966+ return;
75967+}
75968+
75969+void gr_clear_chroot_entries(struct task_struct *task)
75970+{
75971+#ifdef CONFIG_GRKERNSEC
75972+ task->gr_is_chrooted = 0;
75973+ task->gr_chroot_dentry = NULL;
75974+#endif
75975+ return;
75976+}
75977+
75978+int
75979+gr_handle_chroot_unix(const pid_t pid)
75980+{
75981+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75982+ struct task_struct *p;
75983+
75984+ if (unlikely(!grsec_enable_chroot_unix))
75985+ return 1;
75986+
75987+ if (likely(!proc_is_chrooted(current)))
75988+ return 1;
75989+
75990+ rcu_read_lock();
75991+ read_lock(&tasklist_lock);
75992+ p = find_task_by_vpid_unrestricted(pid);
75993+ if (unlikely(p && !have_same_root(current, p))) {
75994+ read_unlock(&tasklist_lock);
75995+ rcu_read_unlock();
75996+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
75997+ return 0;
75998+ }
75999+ read_unlock(&tasklist_lock);
76000+ rcu_read_unlock();
76001+#endif
76002+ return 1;
76003+}
76004+
76005+int
76006+gr_handle_chroot_nice(void)
76007+{
76008+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76009+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76010+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76011+ return -EPERM;
76012+ }
76013+#endif
76014+ return 0;
76015+}
76016+
76017+int
76018+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76019+{
76020+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76021+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76022+ && proc_is_chrooted(current)) {
76023+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76024+ return -EACCES;
76025+ }
76026+#endif
76027+ return 0;
76028+}
76029+
76030+int
76031+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76032+{
76033+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76034+ struct task_struct *p;
76035+ int ret = 0;
76036+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76037+ return ret;
76038+
76039+ read_lock(&tasklist_lock);
76040+ do_each_pid_task(pid, type, p) {
76041+ if (!have_same_root(current, p)) {
76042+ ret = 1;
76043+ goto out;
76044+ }
76045+ } while_each_pid_task(pid, type, p);
76046+out:
76047+ read_unlock(&tasklist_lock);
76048+ return ret;
76049+#endif
76050+ return 0;
76051+}
76052+
76053+int
76054+gr_pid_is_chrooted(struct task_struct *p)
76055+{
76056+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76057+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76058+ return 0;
76059+
76060+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76061+ !have_same_root(current, p)) {
76062+ return 1;
76063+ }
76064+#endif
76065+ return 0;
76066+}
76067+
76068+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76069+
76070+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76071+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76072+{
76073+ struct path path, currentroot;
76074+ int ret = 0;
76075+
76076+ path.dentry = (struct dentry *)u_dentry;
76077+ path.mnt = (struct vfsmount *)u_mnt;
76078+ get_fs_root(current->fs, &currentroot);
76079+ if (path_is_under(&path, &currentroot))
76080+ ret = 1;
76081+ path_put(&currentroot);
76082+
76083+ return ret;
76084+}
76085+#endif
76086+
76087+int
76088+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76089+{
76090+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76091+ if (!grsec_enable_chroot_fchdir)
76092+ return 1;
76093+
76094+ if (!proc_is_chrooted(current))
76095+ return 1;
76096+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76097+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76098+ return 0;
76099+ }
76100+#endif
76101+ return 1;
76102+}
76103+
76104+int
76105+gr_chroot_fhandle(void)
76106+{
76107+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76108+ if (!grsec_enable_chroot_fchdir)
76109+ return 1;
76110+
76111+ if (!proc_is_chrooted(current))
76112+ return 1;
76113+ else {
76114+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76115+ return 0;
76116+ }
76117+#endif
76118+ return 1;
76119+}
76120+
76121+int
76122+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76123+ const u64 shm_createtime)
76124+{
76125+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76126+ struct task_struct *p;
76127+
76128+ if (unlikely(!grsec_enable_chroot_shmat))
76129+ return 1;
76130+
76131+ if (likely(!proc_is_chrooted(current)))
76132+ return 1;
76133+
76134+ rcu_read_lock();
76135+ read_lock(&tasklist_lock);
76136+
76137+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76138+ if (time_before_eq64(p->start_time, shm_createtime)) {
76139+ if (have_same_root(current, p)) {
76140+ goto allow;
76141+ } else {
76142+ read_unlock(&tasklist_lock);
76143+ rcu_read_unlock();
76144+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76145+ return 0;
76146+ }
76147+ }
76148+ /* creator exited, pid reuse, fall through to next check */
76149+ }
76150+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76151+ if (unlikely(!have_same_root(current, p))) {
76152+ read_unlock(&tasklist_lock);
76153+ rcu_read_unlock();
76154+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76155+ return 0;
76156+ }
76157+ }
76158+
76159+allow:
76160+ read_unlock(&tasklist_lock);
76161+ rcu_read_unlock();
76162+#endif
76163+ return 1;
76164+}
76165+
76166+void
76167+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76168+{
76169+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76170+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76171+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76172+#endif
76173+ return;
76174+}
76175+
76176+int
76177+gr_handle_chroot_mknod(const struct dentry *dentry,
76178+ const struct vfsmount *mnt, const int mode)
76179+{
76180+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76181+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76182+ proc_is_chrooted(current)) {
76183+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76184+ return -EPERM;
76185+ }
76186+#endif
76187+ return 0;
76188+}
76189+
76190+int
76191+gr_handle_chroot_mount(const struct dentry *dentry,
76192+ const struct vfsmount *mnt, const char *dev_name)
76193+{
76194+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76195+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76196+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76197+ return -EPERM;
76198+ }
76199+#endif
76200+ return 0;
76201+}
76202+
76203+int
76204+gr_handle_chroot_pivot(void)
76205+{
76206+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76207+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76208+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76209+ return -EPERM;
76210+ }
76211+#endif
76212+ return 0;
76213+}
76214+
76215+int
76216+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76217+{
76218+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76219+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76220+ !gr_is_outside_chroot(dentry, mnt)) {
76221+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76222+ return -EPERM;
76223+ }
76224+#endif
76225+ return 0;
76226+}
76227+
76228+extern const char *captab_log[];
76229+extern int captab_log_entries;
76230+
76231+int
76232+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76233+{
76234+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76235+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76236+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76237+ if (cap_raised(chroot_caps, cap)) {
76238+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76239+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76240+ }
76241+ return 0;
76242+ }
76243+ }
76244+#endif
76245+ return 1;
76246+}
76247+
76248+int
76249+gr_chroot_is_capable(const int cap)
76250+{
76251+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76252+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76253+#endif
76254+ return 1;
76255+}
76256+
76257+int
76258+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76259+{
76260+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76261+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76262+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76263+ if (cap_raised(chroot_caps, cap)) {
76264+ return 0;
76265+ }
76266+ }
76267+#endif
76268+ return 1;
76269+}
76270+
76271+int
76272+gr_chroot_is_capable_nolog(const int cap)
76273+{
76274+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76275+ return gr_task_chroot_is_capable_nolog(current, cap);
76276+#endif
76277+ return 1;
76278+}
76279+
76280+int
76281+gr_handle_chroot_sysctl(const int op)
76282+{
76283+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76284+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76285+ proc_is_chrooted(current))
76286+ return -EACCES;
76287+#endif
76288+ return 0;
76289+}
76290+
76291+void
76292+gr_handle_chroot_chdir(const struct path *path)
76293+{
76294+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76295+ if (grsec_enable_chroot_chdir)
76296+ set_fs_pwd(current->fs, path);
76297+#endif
76298+ return;
76299+}
76300+
76301+int
76302+gr_handle_chroot_chmod(const struct dentry *dentry,
76303+ const struct vfsmount *mnt, const int mode)
76304+{
76305+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76306+ /* allow chmod +s on directories, but not files */
76307+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76308+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76309+ proc_is_chrooted(current)) {
76310+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76311+ return -EPERM;
76312+ }
76313+#endif
76314+ return 0;
76315+}
76316diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76317new file mode 100644
76318index 0000000..946f750
76319--- /dev/null
76320+++ b/grsecurity/grsec_disabled.c
76321@@ -0,0 +1,445 @@
76322+#include <linux/kernel.h>
76323+#include <linux/module.h>
76324+#include <linux/sched.h>
76325+#include <linux/file.h>
76326+#include <linux/fs.h>
76327+#include <linux/kdev_t.h>
76328+#include <linux/net.h>
76329+#include <linux/in.h>
76330+#include <linux/ip.h>
76331+#include <linux/skbuff.h>
76332+#include <linux/sysctl.h>
76333+
76334+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76335+void
76336+pax_set_initial_flags(struct linux_binprm *bprm)
76337+{
76338+ return;
76339+}
76340+#endif
76341+
76342+#ifdef CONFIG_SYSCTL
76343+__u32
76344+gr_handle_sysctl(const struct ctl_table * table, const int op)
76345+{
76346+ return 0;
76347+}
76348+#endif
76349+
76350+#ifdef CONFIG_TASKSTATS
76351+int gr_is_taskstats_denied(int pid)
76352+{
76353+ return 0;
76354+}
76355+#endif
76356+
76357+int
76358+gr_acl_is_enabled(void)
76359+{
76360+ return 0;
76361+}
76362+
76363+int
76364+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76365+{
76366+ return 0;
76367+}
76368+
76369+void
76370+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76371+{
76372+ return;
76373+}
76374+
76375+int
76376+gr_handle_rawio(const struct inode *inode)
76377+{
76378+ return 0;
76379+}
76380+
76381+void
76382+gr_acl_handle_psacct(struct task_struct *task, const long code)
76383+{
76384+ return;
76385+}
76386+
76387+int
76388+gr_handle_ptrace(struct task_struct *task, const long request)
76389+{
76390+ return 0;
76391+}
76392+
76393+int
76394+gr_handle_proc_ptrace(struct task_struct *task)
76395+{
76396+ return 0;
76397+}
76398+
76399+int
76400+gr_set_acls(const int type)
76401+{
76402+ return 0;
76403+}
76404+
76405+int
76406+gr_check_hidden_task(const struct task_struct *tsk)
76407+{
76408+ return 0;
76409+}
76410+
76411+int
76412+gr_check_protected_task(const struct task_struct *task)
76413+{
76414+ return 0;
76415+}
76416+
76417+int
76418+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76419+{
76420+ return 0;
76421+}
76422+
76423+void
76424+gr_copy_label(struct task_struct *tsk)
76425+{
76426+ return;
76427+}
76428+
76429+void
76430+gr_set_pax_flags(struct task_struct *task)
76431+{
76432+ return;
76433+}
76434+
76435+int
76436+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76437+ const int unsafe_share)
76438+{
76439+ return 0;
76440+}
76441+
76442+void
76443+gr_handle_delete(const u64 ino, const dev_t dev)
76444+{
76445+ return;
76446+}
76447+
76448+void
76449+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76450+{
76451+ return;
76452+}
76453+
76454+void
76455+gr_handle_crash(struct task_struct *task, const int sig)
76456+{
76457+ return;
76458+}
76459+
76460+int
76461+gr_check_crash_exec(const struct file *filp)
76462+{
76463+ return 0;
76464+}
76465+
76466+int
76467+gr_check_crash_uid(const kuid_t uid)
76468+{
76469+ return 0;
76470+}
76471+
76472+void
76473+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76474+ struct dentry *old_dentry,
76475+ struct dentry *new_dentry,
76476+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76477+{
76478+ return;
76479+}
76480+
76481+int
76482+gr_search_socket(const int family, const int type, const int protocol)
76483+{
76484+ return 1;
76485+}
76486+
76487+int
76488+gr_search_connectbind(const int mode, const struct socket *sock,
76489+ const struct sockaddr_in *addr)
76490+{
76491+ return 0;
76492+}
76493+
76494+void
76495+gr_handle_alertkill(struct task_struct *task)
76496+{
76497+ return;
76498+}
76499+
76500+__u32
76501+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76502+{
76503+ return 1;
76504+}
76505+
76506+__u32
76507+gr_acl_handle_hidden_file(const struct dentry * dentry,
76508+ const struct vfsmount * mnt)
76509+{
76510+ return 1;
76511+}
76512+
76513+__u32
76514+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76515+ int acc_mode)
76516+{
76517+ return 1;
76518+}
76519+
76520+__u32
76521+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76522+{
76523+ return 1;
76524+}
76525+
76526+__u32
76527+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76528+{
76529+ return 1;
76530+}
76531+
76532+int
76533+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76534+ unsigned int *vm_flags)
76535+{
76536+ return 1;
76537+}
76538+
76539+__u32
76540+gr_acl_handle_truncate(const struct dentry * dentry,
76541+ const struct vfsmount * mnt)
76542+{
76543+ return 1;
76544+}
76545+
76546+__u32
76547+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76548+{
76549+ return 1;
76550+}
76551+
76552+__u32
76553+gr_acl_handle_access(const struct dentry * dentry,
76554+ const struct vfsmount * mnt, const int fmode)
76555+{
76556+ return 1;
76557+}
76558+
76559+__u32
76560+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76561+ umode_t *mode)
76562+{
76563+ return 1;
76564+}
76565+
76566+__u32
76567+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76568+{
76569+ return 1;
76570+}
76571+
76572+__u32
76573+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76574+{
76575+ return 1;
76576+}
76577+
76578+__u32
76579+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76580+{
76581+ return 1;
76582+}
76583+
76584+void
76585+grsecurity_init(void)
76586+{
76587+ return;
76588+}
76589+
76590+umode_t gr_acl_umask(void)
76591+{
76592+ return 0;
76593+}
76594+
76595+__u32
76596+gr_acl_handle_mknod(const struct dentry * new_dentry,
76597+ const struct dentry * parent_dentry,
76598+ const struct vfsmount * parent_mnt,
76599+ const int mode)
76600+{
76601+ return 1;
76602+}
76603+
76604+__u32
76605+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76606+ const struct dentry * parent_dentry,
76607+ const struct vfsmount * parent_mnt)
76608+{
76609+ return 1;
76610+}
76611+
76612+__u32
76613+gr_acl_handle_symlink(const struct dentry * new_dentry,
76614+ const struct dentry * parent_dentry,
76615+ const struct vfsmount * parent_mnt, const struct filename *from)
76616+{
76617+ return 1;
76618+}
76619+
76620+__u32
76621+gr_acl_handle_link(const struct dentry * new_dentry,
76622+ const struct dentry * parent_dentry,
76623+ const struct vfsmount * parent_mnt,
76624+ const struct dentry * old_dentry,
76625+ const struct vfsmount * old_mnt, const struct filename *to)
76626+{
76627+ return 1;
76628+}
76629+
76630+int
76631+gr_acl_handle_rename(const struct dentry *new_dentry,
76632+ const struct dentry *parent_dentry,
76633+ const struct vfsmount *parent_mnt,
76634+ const struct dentry *old_dentry,
76635+ const struct inode *old_parent_inode,
76636+ const struct vfsmount *old_mnt, const struct filename *newname,
76637+ unsigned int flags)
76638+{
76639+ return 0;
76640+}
76641+
76642+int
76643+gr_acl_handle_filldir(const struct file *file, const char *name,
76644+ const int namelen, const u64 ino)
76645+{
76646+ return 1;
76647+}
76648+
76649+int
76650+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76651+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76652+{
76653+ return 1;
76654+}
76655+
76656+int
76657+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76658+{
76659+ return 0;
76660+}
76661+
76662+int
76663+gr_search_accept(const struct socket *sock)
76664+{
76665+ return 0;
76666+}
76667+
76668+int
76669+gr_search_listen(const struct socket *sock)
76670+{
76671+ return 0;
76672+}
76673+
76674+int
76675+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76676+{
76677+ return 0;
76678+}
76679+
76680+__u32
76681+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76682+{
76683+ return 1;
76684+}
76685+
76686+__u32
76687+gr_acl_handle_creat(const struct dentry * dentry,
76688+ const struct dentry * p_dentry,
76689+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76690+ const int imode)
76691+{
76692+ return 1;
76693+}
76694+
76695+void
76696+gr_acl_handle_exit(void)
76697+{
76698+ return;
76699+}
76700+
76701+int
76702+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76703+{
76704+ return 1;
76705+}
76706+
76707+void
76708+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76709+{
76710+ return;
76711+}
76712+
76713+int
76714+gr_acl_handle_procpidmem(const struct task_struct *task)
76715+{
76716+ return 0;
76717+}
76718+
76719+int
76720+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76721+{
76722+ return 0;
76723+}
76724+
76725+int
76726+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76727+{
76728+ return 0;
76729+}
76730+
76731+int
76732+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76733+{
76734+ return 0;
76735+}
76736+
76737+int
76738+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76739+{
76740+ return 0;
76741+}
76742+
76743+int gr_acl_enable_at_secure(void)
76744+{
76745+ return 0;
76746+}
76747+
76748+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76749+{
76750+ return dentry->d_sb->s_dev;
76751+}
76752+
76753+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76754+{
76755+ return dentry->d_inode->i_ino;
76756+}
76757+
76758+void gr_put_exec_file(struct task_struct *task)
76759+{
76760+ return;
76761+}
76762+
76763+#ifdef CONFIG_SECURITY
76764+EXPORT_SYMBOL_GPL(gr_check_user_change);
76765+EXPORT_SYMBOL_GPL(gr_check_group_change);
76766+#endif
76767diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76768new file mode 100644
76769index 0000000..fb7531e
76770--- /dev/null
76771+++ b/grsecurity/grsec_exec.c
76772@@ -0,0 +1,189 @@
76773+#include <linux/kernel.h>
76774+#include <linux/sched.h>
76775+#include <linux/file.h>
76776+#include <linux/binfmts.h>
76777+#include <linux/fs.h>
76778+#include <linux/types.h>
76779+#include <linux/grdefs.h>
76780+#include <linux/grsecurity.h>
76781+#include <linux/grinternal.h>
76782+#include <linux/capability.h>
76783+#include <linux/module.h>
76784+#include <linux/compat.h>
76785+
76786+#include <asm/uaccess.h>
76787+
76788+#ifdef CONFIG_GRKERNSEC_EXECLOG
76789+static char gr_exec_arg_buf[132];
76790+static DEFINE_MUTEX(gr_exec_arg_mutex);
76791+#endif
76792+
76793+struct user_arg_ptr {
76794+#ifdef CONFIG_COMPAT
76795+ bool is_compat;
76796+#endif
76797+ union {
76798+ const char __user *const __user *native;
76799+#ifdef CONFIG_COMPAT
76800+ const compat_uptr_t __user *compat;
76801+#endif
76802+ } ptr;
76803+};
76804+
76805+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76806+
76807+void
76808+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76809+{
76810+#ifdef CONFIG_GRKERNSEC_EXECLOG
76811+ char *grarg = gr_exec_arg_buf;
76812+ unsigned int i, x, execlen = 0;
76813+ char c;
76814+
76815+ if (!((grsec_enable_execlog && grsec_enable_group &&
76816+ in_group_p(grsec_audit_gid))
76817+ || (grsec_enable_execlog && !grsec_enable_group)))
76818+ return;
76819+
76820+ mutex_lock(&gr_exec_arg_mutex);
76821+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76822+
76823+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76824+ const char __user *p;
76825+ unsigned int len;
76826+
76827+ p = get_user_arg_ptr(argv, i);
76828+ if (IS_ERR(p))
76829+ goto log;
76830+
76831+ len = strnlen_user(p, 128 - execlen);
76832+ if (len > 128 - execlen)
76833+ len = 128 - execlen;
76834+ else if (len > 0)
76835+ len--;
76836+ if (copy_from_user(grarg + execlen, p, len))
76837+ goto log;
76838+
76839+ /* rewrite unprintable characters */
76840+ for (x = 0; x < len; x++) {
76841+ c = *(grarg + execlen + x);
76842+ if (c < 32 || c > 126)
76843+ *(grarg + execlen + x) = ' ';
76844+ }
76845+
76846+ execlen += len;
76847+ *(grarg + execlen) = ' ';
76848+ *(grarg + execlen + 1) = '\0';
76849+ execlen++;
76850+ }
76851+
76852+ log:
76853+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76854+ bprm->file->f_path.mnt, grarg);
76855+ mutex_unlock(&gr_exec_arg_mutex);
76856+#endif
76857+ return;
76858+}
76859+
76860+#ifdef CONFIG_GRKERNSEC
76861+extern int gr_acl_is_capable(const int cap);
76862+extern int gr_acl_is_capable_nolog(const int cap);
76863+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76864+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76865+extern int gr_chroot_is_capable(const int cap);
76866+extern int gr_chroot_is_capable_nolog(const int cap);
76867+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76868+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76869+#endif
76870+
76871+const char *captab_log[] = {
76872+ "CAP_CHOWN",
76873+ "CAP_DAC_OVERRIDE",
76874+ "CAP_DAC_READ_SEARCH",
76875+ "CAP_FOWNER",
76876+ "CAP_FSETID",
76877+ "CAP_KILL",
76878+ "CAP_SETGID",
76879+ "CAP_SETUID",
76880+ "CAP_SETPCAP",
76881+ "CAP_LINUX_IMMUTABLE",
76882+ "CAP_NET_BIND_SERVICE",
76883+ "CAP_NET_BROADCAST",
76884+ "CAP_NET_ADMIN",
76885+ "CAP_NET_RAW",
76886+ "CAP_IPC_LOCK",
76887+ "CAP_IPC_OWNER",
76888+ "CAP_SYS_MODULE",
76889+ "CAP_SYS_RAWIO",
76890+ "CAP_SYS_CHROOT",
76891+ "CAP_SYS_PTRACE",
76892+ "CAP_SYS_PACCT",
76893+ "CAP_SYS_ADMIN",
76894+ "CAP_SYS_BOOT",
76895+ "CAP_SYS_NICE",
76896+ "CAP_SYS_RESOURCE",
76897+ "CAP_SYS_TIME",
76898+ "CAP_SYS_TTY_CONFIG",
76899+ "CAP_MKNOD",
76900+ "CAP_LEASE",
76901+ "CAP_AUDIT_WRITE",
76902+ "CAP_AUDIT_CONTROL",
76903+ "CAP_SETFCAP",
76904+ "CAP_MAC_OVERRIDE",
76905+ "CAP_MAC_ADMIN",
76906+ "CAP_SYSLOG",
76907+ "CAP_WAKE_ALARM",
76908+ "CAP_BLOCK_SUSPEND",
76909+ "CAP_AUDIT_READ"
76910+};
76911+
76912+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76913+
76914+int gr_is_capable(const int cap)
76915+{
76916+#ifdef CONFIG_GRKERNSEC
76917+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76918+ return 1;
76919+ return 0;
76920+#else
76921+ return 1;
76922+#endif
76923+}
76924+
76925+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76926+{
76927+#ifdef CONFIG_GRKERNSEC
76928+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
76929+ return 1;
76930+ return 0;
76931+#else
76932+ return 1;
76933+#endif
76934+}
76935+
76936+int gr_is_capable_nolog(const int cap)
76937+{
76938+#ifdef CONFIG_GRKERNSEC
76939+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
76940+ return 1;
76941+ return 0;
76942+#else
76943+ return 1;
76944+#endif
76945+}
76946+
76947+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
76948+{
76949+#ifdef CONFIG_GRKERNSEC
76950+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
76951+ return 1;
76952+ return 0;
76953+#else
76954+ return 1;
76955+#endif
76956+}
76957+
76958+EXPORT_SYMBOL_GPL(gr_is_capable);
76959+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
76960+EXPORT_SYMBOL_GPL(gr_task_is_capable);
76961+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
76962diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
76963new file mode 100644
76964index 0000000..06cc6ea
76965--- /dev/null
76966+++ b/grsecurity/grsec_fifo.c
76967@@ -0,0 +1,24 @@
76968+#include <linux/kernel.h>
76969+#include <linux/sched.h>
76970+#include <linux/fs.h>
76971+#include <linux/file.h>
76972+#include <linux/grinternal.h>
76973+
76974+int
76975+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
76976+ const struct dentry *dir, const int flag, const int acc_mode)
76977+{
76978+#ifdef CONFIG_GRKERNSEC_FIFO
76979+ const struct cred *cred = current_cred();
76980+
76981+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
76982+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
76983+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
76984+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
76985+ if (!inode_permission(dentry->d_inode, acc_mode))
76986+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
76987+ return -EACCES;
76988+ }
76989+#endif
76990+ return 0;
76991+}
76992diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
76993new file mode 100644
76994index 0000000..8ca18bf
76995--- /dev/null
76996+++ b/grsecurity/grsec_fork.c
76997@@ -0,0 +1,23 @@
76998+#include <linux/kernel.h>
76999+#include <linux/sched.h>
77000+#include <linux/grsecurity.h>
77001+#include <linux/grinternal.h>
77002+#include <linux/errno.h>
77003+
77004+void
77005+gr_log_forkfail(const int retval)
77006+{
77007+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77008+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77009+ switch (retval) {
77010+ case -EAGAIN:
77011+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77012+ break;
77013+ case -ENOMEM:
77014+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77015+ break;
77016+ }
77017+ }
77018+#endif
77019+ return;
77020+}
77021diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77022new file mode 100644
77023index 0000000..4ed9e7d
77024--- /dev/null
77025+++ b/grsecurity/grsec_init.c
77026@@ -0,0 +1,290 @@
77027+#include <linux/kernel.h>
77028+#include <linux/sched.h>
77029+#include <linux/mm.h>
77030+#include <linux/gracl.h>
77031+#include <linux/slab.h>
77032+#include <linux/vmalloc.h>
77033+#include <linux/percpu.h>
77034+#include <linux/module.h>
77035+
77036+int grsec_enable_ptrace_readexec;
77037+int grsec_enable_setxid;
77038+int grsec_enable_symlinkown;
77039+kgid_t grsec_symlinkown_gid;
77040+int grsec_enable_brute;
77041+int grsec_enable_link;
77042+int grsec_enable_dmesg;
77043+int grsec_enable_harden_ptrace;
77044+int grsec_enable_harden_ipc;
77045+int grsec_enable_fifo;
77046+int grsec_enable_execlog;
77047+int grsec_enable_signal;
77048+int grsec_enable_forkfail;
77049+int grsec_enable_audit_ptrace;
77050+int grsec_enable_time;
77051+int grsec_enable_group;
77052+kgid_t grsec_audit_gid;
77053+int grsec_enable_chdir;
77054+int grsec_enable_mount;
77055+int grsec_enable_rofs;
77056+int grsec_deny_new_usb;
77057+int grsec_enable_chroot_findtask;
77058+int grsec_enable_chroot_mount;
77059+int grsec_enable_chroot_shmat;
77060+int grsec_enable_chroot_fchdir;
77061+int grsec_enable_chroot_double;
77062+int grsec_enable_chroot_pivot;
77063+int grsec_enable_chroot_chdir;
77064+int grsec_enable_chroot_chmod;
77065+int grsec_enable_chroot_mknod;
77066+int grsec_enable_chroot_nice;
77067+int grsec_enable_chroot_execlog;
77068+int grsec_enable_chroot_caps;
77069+int grsec_enable_chroot_rename;
77070+int grsec_enable_chroot_sysctl;
77071+int grsec_enable_chroot_unix;
77072+int grsec_enable_tpe;
77073+kgid_t grsec_tpe_gid;
77074+int grsec_enable_blackhole;
77075+#ifdef CONFIG_IPV6_MODULE
77076+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77077+#endif
77078+int grsec_lastack_retries;
77079+int grsec_enable_tpe_all;
77080+int grsec_enable_tpe_invert;
77081+int grsec_enable_socket_all;
77082+kgid_t grsec_socket_all_gid;
77083+int grsec_enable_socket_client;
77084+kgid_t grsec_socket_client_gid;
77085+int grsec_enable_socket_server;
77086+kgid_t grsec_socket_server_gid;
77087+int grsec_resource_logging;
77088+int grsec_disable_privio;
77089+int grsec_enable_log_rwxmaps;
77090+int grsec_lock;
77091+
77092+DEFINE_SPINLOCK(grsec_alert_lock);
77093+unsigned long grsec_alert_wtime = 0;
77094+unsigned long grsec_alert_fyet = 0;
77095+
77096+DEFINE_SPINLOCK(grsec_audit_lock);
77097+
77098+DEFINE_RWLOCK(grsec_exec_file_lock);
77099+
77100+char *gr_shared_page[4];
77101+
77102+char *gr_alert_log_fmt;
77103+char *gr_audit_log_fmt;
77104+char *gr_alert_log_buf;
77105+char *gr_audit_log_buf;
77106+
77107+extern struct gr_arg *gr_usermode;
77108+extern unsigned char *gr_system_salt;
77109+extern unsigned char *gr_system_sum;
77110+
77111+void __init
77112+grsecurity_init(void)
77113+{
77114+ int j;
77115+ /* create the per-cpu shared pages */
77116+
77117+#ifdef CONFIG_X86
77118+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77119+#endif
77120+
77121+ for (j = 0; j < 4; j++) {
77122+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77123+ if (gr_shared_page[j] == NULL) {
77124+ panic("Unable to allocate grsecurity shared page");
77125+ return;
77126+ }
77127+ }
77128+
77129+ /* allocate log buffers */
77130+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77131+ if (!gr_alert_log_fmt) {
77132+ panic("Unable to allocate grsecurity alert log format buffer");
77133+ return;
77134+ }
77135+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77136+ if (!gr_audit_log_fmt) {
77137+ panic("Unable to allocate grsecurity audit log format buffer");
77138+ return;
77139+ }
77140+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77141+ if (!gr_alert_log_buf) {
77142+ panic("Unable to allocate grsecurity alert log buffer");
77143+ return;
77144+ }
77145+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77146+ if (!gr_audit_log_buf) {
77147+ panic("Unable to allocate grsecurity audit log buffer");
77148+ return;
77149+ }
77150+
77151+ /* allocate memory for authentication structure */
77152+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77153+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77154+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77155+
77156+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77157+ panic("Unable to allocate grsecurity authentication structure");
77158+ return;
77159+ }
77160+
77161+#ifdef CONFIG_GRKERNSEC_IO
77162+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77163+ grsec_disable_privio = 1;
77164+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77165+ grsec_disable_privio = 1;
77166+#else
77167+ grsec_disable_privio = 0;
77168+#endif
77169+#endif
77170+
77171+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77172+ /* for backward compatibility, tpe_invert always defaults to on if
77173+ enabled in the kernel
77174+ */
77175+ grsec_enable_tpe_invert = 1;
77176+#endif
77177+
77178+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77179+#ifndef CONFIG_GRKERNSEC_SYSCTL
77180+ grsec_lock = 1;
77181+#endif
77182+
77183+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77184+ grsec_enable_log_rwxmaps = 1;
77185+#endif
77186+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77187+ grsec_enable_group = 1;
77188+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77189+#endif
77190+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77191+ grsec_enable_ptrace_readexec = 1;
77192+#endif
77193+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77194+ grsec_enable_chdir = 1;
77195+#endif
77196+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77197+ grsec_enable_harden_ptrace = 1;
77198+#endif
77199+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77200+ grsec_enable_harden_ipc = 1;
77201+#endif
77202+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77203+ grsec_enable_mount = 1;
77204+#endif
77205+#ifdef CONFIG_GRKERNSEC_LINK
77206+ grsec_enable_link = 1;
77207+#endif
77208+#ifdef CONFIG_GRKERNSEC_BRUTE
77209+ grsec_enable_brute = 1;
77210+#endif
77211+#ifdef CONFIG_GRKERNSEC_DMESG
77212+ grsec_enable_dmesg = 1;
77213+#endif
77214+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77215+ grsec_enable_blackhole = 1;
77216+ grsec_lastack_retries = 4;
77217+#endif
77218+#ifdef CONFIG_GRKERNSEC_FIFO
77219+ grsec_enable_fifo = 1;
77220+#endif
77221+#ifdef CONFIG_GRKERNSEC_EXECLOG
77222+ grsec_enable_execlog = 1;
77223+#endif
77224+#ifdef CONFIG_GRKERNSEC_SETXID
77225+ grsec_enable_setxid = 1;
77226+#endif
77227+#ifdef CONFIG_GRKERNSEC_SIGNAL
77228+ grsec_enable_signal = 1;
77229+#endif
77230+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77231+ grsec_enable_forkfail = 1;
77232+#endif
77233+#ifdef CONFIG_GRKERNSEC_TIME
77234+ grsec_enable_time = 1;
77235+#endif
77236+#ifdef CONFIG_GRKERNSEC_RESLOG
77237+ grsec_resource_logging = 1;
77238+#endif
77239+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77240+ grsec_enable_chroot_findtask = 1;
77241+#endif
77242+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77243+ grsec_enable_chroot_unix = 1;
77244+#endif
77245+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77246+ grsec_enable_chroot_mount = 1;
77247+#endif
77248+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77249+ grsec_enable_chroot_fchdir = 1;
77250+#endif
77251+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77252+ grsec_enable_chroot_shmat = 1;
77253+#endif
77254+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77255+ grsec_enable_audit_ptrace = 1;
77256+#endif
77257+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77258+ grsec_enable_chroot_double = 1;
77259+#endif
77260+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77261+ grsec_enable_chroot_pivot = 1;
77262+#endif
77263+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77264+ grsec_enable_chroot_chdir = 1;
77265+#endif
77266+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77267+ grsec_enable_chroot_chmod = 1;
77268+#endif
77269+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77270+ grsec_enable_chroot_mknod = 1;
77271+#endif
77272+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77273+ grsec_enable_chroot_nice = 1;
77274+#endif
77275+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77276+ grsec_enable_chroot_execlog = 1;
77277+#endif
77278+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77279+ grsec_enable_chroot_caps = 1;
77280+#endif
77281+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77282+ grsec_enable_chroot_rename = 1;
77283+#endif
77284+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77285+ grsec_enable_chroot_sysctl = 1;
77286+#endif
77287+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77288+ grsec_enable_symlinkown = 1;
77289+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77290+#endif
77291+#ifdef CONFIG_GRKERNSEC_TPE
77292+ grsec_enable_tpe = 1;
77293+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77294+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77295+ grsec_enable_tpe_all = 1;
77296+#endif
77297+#endif
77298+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77299+ grsec_enable_socket_all = 1;
77300+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77301+#endif
77302+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77303+ grsec_enable_socket_client = 1;
77304+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77305+#endif
77306+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77307+ grsec_enable_socket_server = 1;
77308+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77309+#endif
77310+#endif
77311+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77312+ grsec_deny_new_usb = 1;
77313+#endif
77314+
77315+ return;
77316+}
77317diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77318new file mode 100644
77319index 0000000..1773300
77320--- /dev/null
77321+++ b/grsecurity/grsec_ipc.c
77322@@ -0,0 +1,48 @@
77323+#include <linux/kernel.h>
77324+#include <linux/mm.h>
77325+#include <linux/sched.h>
77326+#include <linux/file.h>
77327+#include <linux/ipc.h>
77328+#include <linux/ipc_namespace.h>
77329+#include <linux/grsecurity.h>
77330+#include <linux/grinternal.h>
77331+
77332+int
77333+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77334+{
77335+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77336+ int write;
77337+ int orig_granted_mode;
77338+ kuid_t euid;
77339+ kgid_t egid;
77340+
77341+ if (!grsec_enable_harden_ipc)
77342+ return 1;
77343+
77344+ euid = current_euid();
77345+ egid = current_egid();
77346+
77347+ write = requested_mode & 00002;
77348+ orig_granted_mode = ipcp->mode;
77349+
77350+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77351+ orig_granted_mode >>= 6;
77352+ else {
77353+ /* if likely wrong permissions, lock to user */
77354+ if (orig_granted_mode & 0007)
77355+ orig_granted_mode = 0;
77356+ /* otherwise do a egid-only check */
77357+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77358+ orig_granted_mode >>= 3;
77359+ /* otherwise, no access */
77360+ else
77361+ orig_granted_mode = 0;
77362+ }
77363+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77364+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77365+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77366+ return 0;
77367+ }
77368+#endif
77369+ return 1;
77370+}
77371diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77372new file mode 100644
77373index 0000000..5e05e20
77374--- /dev/null
77375+++ b/grsecurity/grsec_link.c
77376@@ -0,0 +1,58 @@
77377+#include <linux/kernel.h>
77378+#include <linux/sched.h>
77379+#include <linux/fs.h>
77380+#include <linux/file.h>
77381+#include <linux/grinternal.h>
77382+
77383+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77384+{
77385+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77386+ const struct inode *link_inode = link->dentry->d_inode;
77387+
77388+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77389+ /* ignore root-owned links, e.g. /proc/self */
77390+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77391+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77392+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77393+ return 1;
77394+ }
77395+#endif
77396+ return 0;
77397+}
77398+
77399+int
77400+gr_handle_follow_link(const struct inode *parent,
77401+ const struct inode *inode,
77402+ const struct dentry *dentry, const struct vfsmount *mnt)
77403+{
77404+#ifdef CONFIG_GRKERNSEC_LINK
77405+ const struct cred *cred = current_cred();
77406+
77407+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77408+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77409+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77410+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77411+ return -EACCES;
77412+ }
77413+#endif
77414+ return 0;
77415+}
77416+
77417+int
77418+gr_handle_hardlink(const struct dentry *dentry,
77419+ const struct vfsmount *mnt,
77420+ struct inode *inode, const int mode, const struct filename *to)
77421+{
77422+#ifdef CONFIG_GRKERNSEC_LINK
77423+ const struct cred *cred = current_cred();
77424+
77425+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77426+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77427+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77428+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77429+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77430+ return -EPERM;
77431+ }
77432+#endif
77433+ return 0;
77434+}
77435diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77436new file mode 100644
77437index 0000000..dbe0a6b
77438--- /dev/null
77439+++ b/grsecurity/grsec_log.c
77440@@ -0,0 +1,341 @@
77441+#include <linux/kernel.h>
77442+#include <linux/sched.h>
77443+#include <linux/file.h>
77444+#include <linux/tty.h>
77445+#include <linux/fs.h>
77446+#include <linux/mm.h>
77447+#include <linux/grinternal.h>
77448+
77449+#ifdef CONFIG_TREE_PREEMPT_RCU
77450+#define DISABLE_PREEMPT() preempt_disable()
77451+#define ENABLE_PREEMPT() preempt_enable()
77452+#else
77453+#define DISABLE_PREEMPT()
77454+#define ENABLE_PREEMPT()
77455+#endif
77456+
77457+#define BEGIN_LOCKS(x) \
77458+ DISABLE_PREEMPT(); \
77459+ rcu_read_lock(); \
77460+ read_lock(&tasklist_lock); \
77461+ read_lock(&grsec_exec_file_lock); \
77462+ if (x != GR_DO_AUDIT) \
77463+ spin_lock(&grsec_alert_lock); \
77464+ else \
77465+ spin_lock(&grsec_audit_lock)
77466+
77467+#define END_LOCKS(x) \
77468+ if (x != GR_DO_AUDIT) \
77469+ spin_unlock(&grsec_alert_lock); \
77470+ else \
77471+ spin_unlock(&grsec_audit_lock); \
77472+ read_unlock(&grsec_exec_file_lock); \
77473+ read_unlock(&tasklist_lock); \
77474+ rcu_read_unlock(); \
77475+ ENABLE_PREEMPT(); \
77476+ if (x == GR_DONT_AUDIT) \
77477+ gr_handle_alertkill(current)
77478+
77479+enum {
77480+ FLOODING,
77481+ NO_FLOODING
77482+};
77483+
77484+extern char *gr_alert_log_fmt;
77485+extern char *gr_audit_log_fmt;
77486+extern char *gr_alert_log_buf;
77487+extern char *gr_audit_log_buf;
77488+
77489+static int gr_log_start(int audit)
77490+{
77491+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77492+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77493+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77494+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77495+ unsigned long curr_secs = get_seconds();
77496+
77497+ if (audit == GR_DO_AUDIT)
77498+ goto set_fmt;
77499+
77500+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77501+ grsec_alert_wtime = curr_secs;
77502+ grsec_alert_fyet = 0;
77503+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77504+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77505+ grsec_alert_fyet++;
77506+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77507+ grsec_alert_wtime = curr_secs;
77508+ grsec_alert_fyet++;
77509+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77510+ return FLOODING;
77511+ }
77512+ else return FLOODING;
77513+
77514+set_fmt:
77515+#endif
77516+ memset(buf, 0, PAGE_SIZE);
77517+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77518+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77519+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77520+ } else if (current->signal->curr_ip) {
77521+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77522+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77523+ } else if (gr_acl_is_enabled()) {
77524+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77525+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77526+ } else {
77527+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77528+ strcpy(buf, fmt);
77529+ }
77530+
77531+ return NO_FLOODING;
77532+}
77533+
77534+static void gr_log_middle(int audit, const char *msg, va_list ap)
77535+ __attribute__ ((format (printf, 2, 0)));
77536+
77537+static void gr_log_middle(int audit, const char *msg, va_list ap)
77538+{
77539+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77540+ unsigned int len = strlen(buf);
77541+
77542+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77543+
77544+ return;
77545+}
77546+
77547+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77548+ __attribute__ ((format (printf, 2, 3)));
77549+
77550+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77551+{
77552+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77553+ unsigned int len = strlen(buf);
77554+ va_list ap;
77555+
77556+ va_start(ap, msg);
77557+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77558+ va_end(ap);
77559+
77560+ return;
77561+}
77562+
77563+static void gr_log_end(int audit, int append_default)
77564+{
77565+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77566+ if (append_default) {
77567+ struct task_struct *task = current;
77568+ struct task_struct *parent = task->real_parent;
77569+ const struct cred *cred = __task_cred(task);
77570+ const struct cred *pcred = __task_cred(parent);
77571+ unsigned int len = strlen(buf);
77572+
77573+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77574+ }
77575+
77576+ printk("%s\n", buf);
77577+
77578+ return;
77579+}
77580+
77581+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77582+{
77583+ int logtype;
77584+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77585+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77586+ void *voidptr = NULL;
77587+ int num1 = 0, num2 = 0;
77588+ unsigned long ulong1 = 0, ulong2 = 0;
77589+ struct dentry *dentry = NULL;
77590+ struct vfsmount *mnt = NULL;
77591+ struct file *file = NULL;
77592+ struct task_struct *task = NULL;
77593+ struct vm_area_struct *vma = NULL;
77594+ const struct cred *cred, *pcred;
77595+ va_list ap;
77596+
77597+ BEGIN_LOCKS(audit);
77598+ logtype = gr_log_start(audit);
77599+ if (logtype == FLOODING) {
77600+ END_LOCKS(audit);
77601+ return;
77602+ }
77603+ va_start(ap, argtypes);
77604+ switch (argtypes) {
77605+ case GR_TTYSNIFF:
77606+ task = va_arg(ap, struct task_struct *);
77607+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77608+ break;
77609+ case GR_SYSCTL_HIDDEN:
77610+ str1 = va_arg(ap, char *);
77611+ gr_log_middle_varargs(audit, msg, result, str1);
77612+ break;
77613+ case GR_RBAC:
77614+ dentry = va_arg(ap, struct dentry *);
77615+ mnt = va_arg(ap, struct vfsmount *);
77616+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77617+ break;
77618+ case GR_RBAC_STR:
77619+ dentry = va_arg(ap, struct dentry *);
77620+ mnt = va_arg(ap, struct vfsmount *);
77621+ str1 = va_arg(ap, char *);
77622+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77623+ break;
77624+ case GR_STR_RBAC:
77625+ str1 = va_arg(ap, char *);
77626+ dentry = va_arg(ap, struct dentry *);
77627+ mnt = va_arg(ap, struct vfsmount *);
77628+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77629+ break;
77630+ case GR_RBAC_MODE2:
77631+ dentry = va_arg(ap, struct dentry *);
77632+ mnt = va_arg(ap, struct vfsmount *);
77633+ str1 = va_arg(ap, char *);
77634+ str2 = va_arg(ap, char *);
77635+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77636+ break;
77637+ case GR_RBAC_MODE3:
77638+ dentry = va_arg(ap, struct dentry *);
77639+ mnt = va_arg(ap, struct vfsmount *);
77640+ str1 = va_arg(ap, char *);
77641+ str2 = va_arg(ap, char *);
77642+ str3 = va_arg(ap, char *);
77643+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77644+ break;
77645+ case GR_FILENAME:
77646+ dentry = va_arg(ap, struct dentry *);
77647+ mnt = va_arg(ap, struct vfsmount *);
77648+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77649+ break;
77650+ case GR_STR_FILENAME:
77651+ str1 = va_arg(ap, char *);
77652+ dentry = va_arg(ap, struct dentry *);
77653+ mnt = va_arg(ap, struct vfsmount *);
77654+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77655+ break;
77656+ case GR_FILENAME_STR:
77657+ dentry = va_arg(ap, struct dentry *);
77658+ mnt = va_arg(ap, struct vfsmount *);
77659+ str1 = va_arg(ap, char *);
77660+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77661+ break;
77662+ case GR_FILENAME_TWO_INT:
77663+ dentry = va_arg(ap, struct dentry *);
77664+ mnt = va_arg(ap, struct vfsmount *);
77665+ num1 = va_arg(ap, int);
77666+ num2 = va_arg(ap, int);
77667+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77668+ break;
77669+ case GR_FILENAME_TWO_INT_STR:
77670+ dentry = va_arg(ap, struct dentry *);
77671+ mnt = va_arg(ap, struct vfsmount *);
77672+ num1 = va_arg(ap, int);
77673+ num2 = va_arg(ap, int);
77674+ str1 = va_arg(ap, char *);
77675+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77676+ break;
77677+ case GR_TEXTREL:
77678+ file = va_arg(ap, struct file *);
77679+ ulong1 = va_arg(ap, unsigned long);
77680+ ulong2 = va_arg(ap, unsigned long);
77681+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77682+ break;
77683+ case GR_PTRACE:
77684+ task = va_arg(ap, struct task_struct *);
77685+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77686+ break;
77687+ case GR_RESOURCE:
77688+ task = va_arg(ap, struct task_struct *);
77689+ cred = __task_cred(task);
77690+ pcred = __task_cred(task->real_parent);
77691+ ulong1 = va_arg(ap, unsigned long);
77692+ str1 = va_arg(ap, char *);
77693+ ulong2 = va_arg(ap, unsigned long);
77694+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77695+ break;
77696+ case GR_CAP:
77697+ task = va_arg(ap, struct task_struct *);
77698+ cred = __task_cred(task);
77699+ pcred = __task_cred(task->real_parent);
77700+ str1 = va_arg(ap, char *);
77701+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77702+ break;
77703+ case GR_SIG:
77704+ str1 = va_arg(ap, char *);
77705+ voidptr = va_arg(ap, void *);
77706+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77707+ break;
77708+ case GR_SIG2:
77709+ task = va_arg(ap, struct task_struct *);
77710+ cred = __task_cred(task);
77711+ pcred = __task_cred(task->real_parent);
77712+ num1 = va_arg(ap, int);
77713+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77714+ break;
77715+ case GR_CRASH1:
77716+ task = va_arg(ap, struct task_struct *);
77717+ cred = __task_cred(task);
77718+ pcred = __task_cred(task->real_parent);
77719+ ulong1 = va_arg(ap, unsigned long);
77720+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77721+ break;
77722+ case GR_CRASH2:
77723+ task = va_arg(ap, struct task_struct *);
77724+ cred = __task_cred(task);
77725+ pcred = __task_cred(task->real_parent);
77726+ ulong1 = va_arg(ap, unsigned long);
77727+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77728+ break;
77729+ case GR_RWXMAP:
77730+ file = va_arg(ap, struct file *);
77731+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77732+ break;
77733+ case GR_RWXMAPVMA:
77734+ vma = va_arg(ap, struct vm_area_struct *);
77735+ if (vma->vm_file)
77736+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77737+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77738+ str1 = "<stack>";
77739+ else if (vma->vm_start <= current->mm->brk &&
77740+ vma->vm_end >= current->mm->start_brk)
77741+ str1 = "<heap>";
77742+ else
77743+ str1 = "<anonymous mapping>";
77744+ gr_log_middle_varargs(audit, msg, str1);
77745+ break;
77746+ case GR_PSACCT:
77747+ {
77748+ unsigned int wday, cday;
77749+ __u8 whr, chr;
77750+ __u8 wmin, cmin;
77751+ __u8 wsec, csec;
77752+ char cur_tty[64] = { 0 };
77753+ char parent_tty[64] = { 0 };
77754+
77755+ task = va_arg(ap, struct task_struct *);
77756+ wday = va_arg(ap, unsigned int);
77757+ cday = va_arg(ap, unsigned int);
77758+ whr = va_arg(ap, int);
77759+ chr = va_arg(ap, int);
77760+ wmin = va_arg(ap, int);
77761+ cmin = va_arg(ap, int);
77762+ wsec = va_arg(ap, int);
77763+ csec = va_arg(ap, int);
77764+ ulong1 = va_arg(ap, unsigned long);
77765+ cred = __task_cred(task);
77766+ pcred = __task_cred(task->real_parent);
77767+
77768+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77769+ }
77770+ break;
77771+ default:
77772+ gr_log_middle(audit, msg, ap);
77773+ }
77774+ va_end(ap);
77775+ // these don't need DEFAULTSECARGS printed on the end
77776+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77777+ gr_log_end(audit, 0);
77778+ else
77779+ gr_log_end(audit, 1);
77780+ END_LOCKS(audit);
77781+}
77782diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77783new file mode 100644
77784index 0000000..0e39d8c
77785--- /dev/null
77786+++ b/grsecurity/grsec_mem.c
77787@@ -0,0 +1,48 @@
77788+#include <linux/kernel.h>
77789+#include <linux/sched.h>
77790+#include <linux/mm.h>
77791+#include <linux/mman.h>
77792+#include <linux/module.h>
77793+#include <linux/grinternal.h>
77794+
77795+void gr_handle_msr_write(void)
77796+{
77797+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77798+ return;
77799+}
77800+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77801+
77802+void
77803+gr_handle_ioperm(void)
77804+{
77805+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77806+ return;
77807+}
77808+
77809+void
77810+gr_handle_iopl(void)
77811+{
77812+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77813+ return;
77814+}
77815+
77816+void
77817+gr_handle_mem_readwrite(u64 from, u64 to)
77818+{
77819+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77820+ return;
77821+}
77822+
77823+void
77824+gr_handle_vm86(void)
77825+{
77826+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77827+ return;
77828+}
77829+
77830+void
77831+gr_log_badprocpid(const char *entry)
77832+{
77833+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77834+ return;
77835+}
77836diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77837new file mode 100644
77838index 0000000..6f9eb73
77839--- /dev/null
77840+++ b/grsecurity/grsec_mount.c
77841@@ -0,0 +1,65 @@
77842+#include <linux/kernel.h>
77843+#include <linux/sched.h>
77844+#include <linux/mount.h>
77845+#include <linux/major.h>
77846+#include <linux/grsecurity.h>
77847+#include <linux/grinternal.h>
77848+
77849+void
77850+gr_log_remount(const char *devname, const int retval)
77851+{
77852+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77853+ if (grsec_enable_mount && (retval >= 0))
77854+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77855+#endif
77856+ return;
77857+}
77858+
77859+void
77860+gr_log_unmount(const char *devname, const int retval)
77861+{
77862+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77863+ if (grsec_enable_mount && (retval >= 0))
77864+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77865+#endif
77866+ return;
77867+}
77868+
77869+void
77870+gr_log_mount(const char *from, struct path *to, const int retval)
77871+{
77872+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77873+ if (grsec_enable_mount && (retval >= 0))
77874+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77875+#endif
77876+ return;
77877+}
77878+
77879+int
77880+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77881+{
77882+#ifdef CONFIG_GRKERNSEC_ROFS
77883+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77884+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77885+ return -EPERM;
77886+ } else
77887+ return 0;
77888+#endif
77889+ return 0;
77890+}
77891+
77892+int
77893+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77894+{
77895+#ifdef CONFIG_GRKERNSEC_ROFS
77896+ struct inode *inode = dentry->d_inode;
77897+
77898+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77899+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77900+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77901+ return -EPERM;
77902+ } else
77903+ return 0;
77904+#endif
77905+ return 0;
77906+}
77907diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77908new file mode 100644
77909index 0000000..6ee9d50
77910--- /dev/null
77911+++ b/grsecurity/grsec_pax.c
77912@@ -0,0 +1,45 @@
77913+#include <linux/kernel.h>
77914+#include <linux/sched.h>
77915+#include <linux/mm.h>
77916+#include <linux/file.h>
77917+#include <linux/grinternal.h>
77918+#include <linux/grsecurity.h>
77919+
77920+void
77921+gr_log_textrel(struct vm_area_struct * vma)
77922+{
77923+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77924+ if (grsec_enable_log_rwxmaps)
77925+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
77926+#endif
77927+ return;
77928+}
77929+
77930+void gr_log_ptgnustack(struct file *file)
77931+{
77932+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77933+ if (grsec_enable_log_rwxmaps)
77934+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
77935+#endif
77936+ return;
77937+}
77938+
77939+void
77940+gr_log_rwxmmap(struct file *file)
77941+{
77942+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77943+ if (grsec_enable_log_rwxmaps)
77944+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
77945+#endif
77946+ return;
77947+}
77948+
77949+void
77950+gr_log_rwxmprotect(struct vm_area_struct *vma)
77951+{
77952+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77953+ if (grsec_enable_log_rwxmaps)
77954+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
77955+#endif
77956+ return;
77957+}
77958diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
77959new file mode 100644
77960index 0000000..2005a3a
77961--- /dev/null
77962+++ b/grsecurity/grsec_proc.c
77963@@ -0,0 +1,20 @@
77964+#include <linux/kernel.h>
77965+#include <linux/sched.h>
77966+#include <linux/grsecurity.h>
77967+#include <linux/grinternal.h>
77968+
77969+int gr_proc_is_restricted(void)
77970+{
77971+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77972+ const struct cred *cred = current_cred();
77973+#endif
77974+
77975+#ifdef CONFIG_GRKERNSEC_PROC_USER
77976+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
77977+ return -EACCES;
77978+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77979+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
77980+ return -EACCES;
77981+#endif
77982+ return 0;
77983+}
77984diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
77985new file mode 100644
77986index 0000000..f7f29aa
77987--- /dev/null
77988+++ b/grsecurity/grsec_ptrace.c
77989@@ -0,0 +1,30 @@
77990+#include <linux/kernel.h>
77991+#include <linux/sched.h>
77992+#include <linux/grinternal.h>
77993+#include <linux/security.h>
77994+
77995+void
77996+gr_audit_ptrace(struct task_struct *task)
77997+{
77998+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77999+ if (grsec_enable_audit_ptrace)
78000+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78001+#endif
78002+ return;
78003+}
78004+
78005+int
78006+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78007+{
78008+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78009+ const struct dentry *dentry = file->f_path.dentry;
78010+ const struct vfsmount *mnt = file->f_path.mnt;
78011+
78012+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78013+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78014+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78015+ return -EACCES;
78016+ }
78017+#endif
78018+ return 0;
78019+}
78020diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78021new file mode 100644
78022index 0000000..3860c7e
78023--- /dev/null
78024+++ b/grsecurity/grsec_sig.c
78025@@ -0,0 +1,236 @@
78026+#include <linux/kernel.h>
78027+#include <linux/sched.h>
78028+#include <linux/fs.h>
78029+#include <linux/delay.h>
78030+#include <linux/grsecurity.h>
78031+#include <linux/grinternal.h>
78032+#include <linux/hardirq.h>
78033+
78034+char *signames[] = {
78035+ [SIGSEGV] = "Segmentation fault",
78036+ [SIGILL] = "Illegal instruction",
78037+ [SIGABRT] = "Abort",
78038+ [SIGBUS] = "Invalid alignment/Bus error"
78039+};
78040+
78041+void
78042+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78043+{
78044+#ifdef CONFIG_GRKERNSEC_SIGNAL
78045+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78046+ (sig == SIGABRT) || (sig == SIGBUS))) {
78047+ if (task_pid_nr(t) == task_pid_nr(current)) {
78048+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78049+ } else {
78050+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78051+ }
78052+ }
78053+#endif
78054+ return;
78055+}
78056+
78057+int
78058+gr_handle_signal(const struct task_struct *p, const int sig)
78059+{
78060+#ifdef CONFIG_GRKERNSEC
78061+ /* ignore the 0 signal for protected task checks */
78062+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78063+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78064+ return -EPERM;
78065+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78066+ return -EPERM;
78067+ }
78068+#endif
78069+ return 0;
78070+}
78071+
78072+#ifdef CONFIG_GRKERNSEC
78073+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78074+
78075+int gr_fake_force_sig(int sig, struct task_struct *t)
78076+{
78077+ unsigned long int flags;
78078+ int ret, blocked, ignored;
78079+ struct k_sigaction *action;
78080+
78081+ spin_lock_irqsave(&t->sighand->siglock, flags);
78082+ action = &t->sighand->action[sig-1];
78083+ ignored = action->sa.sa_handler == SIG_IGN;
78084+ blocked = sigismember(&t->blocked, sig);
78085+ if (blocked || ignored) {
78086+ action->sa.sa_handler = SIG_DFL;
78087+ if (blocked) {
78088+ sigdelset(&t->blocked, sig);
78089+ recalc_sigpending_and_wake(t);
78090+ }
78091+ }
78092+ if (action->sa.sa_handler == SIG_DFL)
78093+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78094+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78095+
78096+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78097+
78098+ return ret;
78099+}
78100+#endif
78101+
78102+#define GR_USER_BAN_TIME (15 * 60)
78103+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78104+
78105+void gr_handle_brute_attach(int dumpable)
78106+{
78107+#ifdef CONFIG_GRKERNSEC_BRUTE
78108+ struct task_struct *p = current;
78109+ kuid_t uid = GLOBAL_ROOT_UID;
78110+ int daemon = 0;
78111+
78112+ if (!grsec_enable_brute)
78113+ return;
78114+
78115+ rcu_read_lock();
78116+ read_lock(&tasklist_lock);
78117+ read_lock(&grsec_exec_file_lock);
78118+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78119+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78120+ p->real_parent->brute = 1;
78121+ daemon = 1;
78122+ } else {
78123+ const struct cred *cred = __task_cred(p), *cred2;
78124+ struct task_struct *tsk, *tsk2;
78125+
78126+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78127+ struct user_struct *user;
78128+
78129+ uid = cred->uid;
78130+
78131+ /* this is put upon execution past expiration */
78132+ user = find_user(uid);
78133+ if (user == NULL)
78134+ goto unlock;
78135+ user->suid_banned = 1;
78136+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78137+ if (user->suid_ban_expires == ~0UL)
78138+ user->suid_ban_expires--;
78139+
78140+ /* only kill other threads of the same binary, from the same user */
78141+ do_each_thread(tsk2, tsk) {
78142+ cred2 = __task_cred(tsk);
78143+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78144+ gr_fake_force_sig(SIGKILL, tsk);
78145+ } while_each_thread(tsk2, tsk);
78146+ }
78147+ }
78148+unlock:
78149+ read_unlock(&grsec_exec_file_lock);
78150+ read_unlock(&tasklist_lock);
78151+ rcu_read_unlock();
78152+
78153+ if (gr_is_global_nonroot(uid))
78154+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78155+ else if (daemon)
78156+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78157+
78158+#endif
78159+ return;
78160+}
78161+
78162+void gr_handle_brute_check(void)
78163+{
78164+#ifdef CONFIG_GRKERNSEC_BRUTE
78165+ struct task_struct *p = current;
78166+
78167+ if (unlikely(p->brute)) {
78168+ if (!grsec_enable_brute)
78169+ p->brute = 0;
78170+ else if (time_before(get_seconds(), p->brute_expires))
78171+ msleep(30 * 1000);
78172+ }
78173+#endif
78174+ return;
78175+}
78176+
78177+void gr_handle_kernel_exploit(void)
78178+{
78179+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78180+ const struct cred *cred;
78181+ struct task_struct *tsk, *tsk2;
78182+ struct user_struct *user;
78183+ kuid_t uid;
78184+
78185+ if (in_irq() || in_serving_softirq() || in_nmi())
78186+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78187+
78188+ uid = current_uid();
78189+
78190+ if (gr_is_global_root(uid))
78191+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78192+ else {
78193+ /* kill all the processes of this user, hold a reference
78194+ to their creds struct, and prevent them from creating
78195+ another process until system reset
78196+ */
78197+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78198+ GR_GLOBAL_UID(uid));
78199+ /* we intentionally leak this ref */
78200+ user = get_uid(current->cred->user);
78201+ if (user)
78202+ user->kernel_banned = 1;
78203+
78204+ /* kill all processes of this user */
78205+ read_lock(&tasklist_lock);
78206+ do_each_thread(tsk2, tsk) {
78207+ cred = __task_cred(tsk);
78208+ if (uid_eq(cred->uid, uid))
78209+ gr_fake_force_sig(SIGKILL, tsk);
78210+ } while_each_thread(tsk2, tsk);
78211+ read_unlock(&tasklist_lock);
78212+ }
78213+#endif
78214+}
78215+
78216+#ifdef CONFIG_GRKERNSEC_BRUTE
78217+static bool suid_ban_expired(struct user_struct *user)
78218+{
78219+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78220+ user->suid_banned = 0;
78221+ user->suid_ban_expires = 0;
78222+ free_uid(user);
78223+ return true;
78224+ }
78225+
78226+ return false;
78227+}
78228+#endif
78229+
78230+int gr_process_kernel_exec_ban(void)
78231+{
78232+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78233+ if (unlikely(current->cred->user->kernel_banned))
78234+ return -EPERM;
78235+#endif
78236+ return 0;
78237+}
78238+
78239+int gr_process_kernel_setuid_ban(struct user_struct *user)
78240+{
78241+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78242+ if (unlikely(user->kernel_banned))
78243+ gr_fake_force_sig(SIGKILL, current);
78244+#endif
78245+ return 0;
78246+}
78247+
78248+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78249+{
78250+#ifdef CONFIG_GRKERNSEC_BRUTE
78251+ struct user_struct *user = current->cred->user;
78252+ if (unlikely(user->suid_banned)) {
78253+ if (suid_ban_expired(user))
78254+ return 0;
78255+ /* disallow execution of suid binaries only */
78256+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78257+ return -EPERM;
78258+ }
78259+#endif
78260+ return 0;
78261+}
78262diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78263new file mode 100644
78264index 0000000..e3650b6
78265--- /dev/null
78266+++ b/grsecurity/grsec_sock.c
78267@@ -0,0 +1,244 @@
78268+#include <linux/kernel.h>
78269+#include <linux/module.h>
78270+#include <linux/sched.h>
78271+#include <linux/file.h>
78272+#include <linux/net.h>
78273+#include <linux/in.h>
78274+#include <linux/ip.h>
78275+#include <net/sock.h>
78276+#include <net/inet_sock.h>
78277+#include <linux/grsecurity.h>
78278+#include <linux/grinternal.h>
78279+#include <linux/gracl.h>
78280+
78281+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78282+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78283+
78284+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78285+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78286+
78287+#ifdef CONFIG_UNIX_MODULE
78288+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78289+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78290+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78291+EXPORT_SYMBOL_GPL(gr_handle_create);
78292+#endif
78293+
78294+#ifdef CONFIG_GRKERNSEC
78295+#define gr_conn_table_size 32749
78296+struct conn_table_entry {
78297+ struct conn_table_entry *next;
78298+ struct signal_struct *sig;
78299+};
78300+
78301+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78302+DEFINE_SPINLOCK(gr_conn_table_lock);
78303+
78304+extern const char * gr_socktype_to_name(unsigned char type);
78305+extern const char * gr_proto_to_name(unsigned char proto);
78306+extern const char * gr_sockfamily_to_name(unsigned char family);
78307+
78308+static __inline__ int
78309+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78310+{
78311+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78312+}
78313+
78314+static __inline__ int
78315+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78316+ __u16 sport, __u16 dport)
78317+{
78318+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78319+ sig->gr_sport == sport && sig->gr_dport == dport))
78320+ return 1;
78321+ else
78322+ return 0;
78323+}
78324+
78325+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78326+{
78327+ struct conn_table_entry **match;
78328+ unsigned int index;
78329+
78330+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78331+ sig->gr_sport, sig->gr_dport,
78332+ gr_conn_table_size);
78333+
78334+ newent->sig = sig;
78335+
78336+ match = &gr_conn_table[index];
78337+ newent->next = *match;
78338+ *match = newent;
78339+
78340+ return;
78341+}
78342+
78343+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78344+{
78345+ struct conn_table_entry *match, *last = NULL;
78346+ unsigned int index;
78347+
78348+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78349+ sig->gr_sport, sig->gr_dport,
78350+ gr_conn_table_size);
78351+
78352+ match = gr_conn_table[index];
78353+ while (match && !conn_match(match->sig,
78354+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78355+ sig->gr_dport)) {
78356+ last = match;
78357+ match = match->next;
78358+ }
78359+
78360+ if (match) {
78361+ if (last)
78362+ last->next = match->next;
78363+ else
78364+ gr_conn_table[index] = NULL;
78365+ kfree(match);
78366+ }
78367+
78368+ return;
78369+}
78370+
78371+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78372+ __u16 sport, __u16 dport)
78373+{
78374+ struct conn_table_entry *match;
78375+ unsigned int index;
78376+
78377+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78378+
78379+ match = gr_conn_table[index];
78380+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78381+ match = match->next;
78382+
78383+ if (match)
78384+ return match->sig;
78385+ else
78386+ return NULL;
78387+}
78388+
78389+#endif
78390+
78391+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78392+{
78393+#ifdef CONFIG_GRKERNSEC
78394+ struct signal_struct *sig = current->signal;
78395+ struct conn_table_entry *newent;
78396+
78397+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78398+ if (newent == NULL)
78399+ return;
78400+ /* no bh lock needed since we are called with bh disabled */
78401+ spin_lock(&gr_conn_table_lock);
78402+ gr_del_task_from_ip_table_nolock(sig);
78403+ sig->gr_saddr = inet->inet_rcv_saddr;
78404+ sig->gr_daddr = inet->inet_daddr;
78405+ sig->gr_sport = inet->inet_sport;
78406+ sig->gr_dport = inet->inet_dport;
78407+ gr_add_to_task_ip_table_nolock(sig, newent);
78408+ spin_unlock(&gr_conn_table_lock);
78409+#endif
78410+ return;
78411+}
78412+
78413+void gr_del_task_from_ip_table(struct task_struct *task)
78414+{
78415+#ifdef CONFIG_GRKERNSEC
78416+ spin_lock_bh(&gr_conn_table_lock);
78417+ gr_del_task_from_ip_table_nolock(task->signal);
78418+ spin_unlock_bh(&gr_conn_table_lock);
78419+#endif
78420+ return;
78421+}
78422+
78423+void
78424+gr_attach_curr_ip(const struct sock *sk)
78425+{
78426+#ifdef CONFIG_GRKERNSEC
78427+ struct signal_struct *p, *set;
78428+ const struct inet_sock *inet = inet_sk(sk);
78429+
78430+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78431+ return;
78432+
78433+ set = current->signal;
78434+
78435+ spin_lock_bh(&gr_conn_table_lock);
78436+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78437+ inet->inet_dport, inet->inet_sport);
78438+ if (unlikely(p != NULL)) {
78439+ set->curr_ip = p->curr_ip;
78440+ set->used_accept = 1;
78441+ gr_del_task_from_ip_table_nolock(p);
78442+ spin_unlock_bh(&gr_conn_table_lock);
78443+ return;
78444+ }
78445+ spin_unlock_bh(&gr_conn_table_lock);
78446+
78447+ set->curr_ip = inet->inet_daddr;
78448+ set->used_accept = 1;
78449+#endif
78450+ return;
78451+}
78452+
78453+int
78454+gr_handle_sock_all(const int family, const int type, const int protocol)
78455+{
78456+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78457+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78458+ (family != AF_UNIX)) {
78459+ if (family == AF_INET)
78460+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78461+ else
78462+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78463+ return -EACCES;
78464+ }
78465+#endif
78466+ return 0;
78467+}
78468+
78469+int
78470+gr_handle_sock_server(const struct sockaddr *sck)
78471+{
78472+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78473+ if (grsec_enable_socket_server &&
78474+ in_group_p(grsec_socket_server_gid) &&
78475+ sck && (sck->sa_family != AF_UNIX) &&
78476+ (sck->sa_family != AF_LOCAL)) {
78477+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78478+ return -EACCES;
78479+ }
78480+#endif
78481+ return 0;
78482+}
78483+
78484+int
78485+gr_handle_sock_server_other(const struct sock *sck)
78486+{
78487+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78488+ if (grsec_enable_socket_server &&
78489+ in_group_p(grsec_socket_server_gid) &&
78490+ sck && (sck->sk_family != AF_UNIX) &&
78491+ (sck->sk_family != AF_LOCAL)) {
78492+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78493+ return -EACCES;
78494+ }
78495+#endif
78496+ return 0;
78497+}
78498+
78499+int
78500+gr_handle_sock_client(const struct sockaddr *sck)
78501+{
78502+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78503+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78504+ sck && (sck->sa_family != AF_UNIX) &&
78505+ (sck->sa_family != AF_LOCAL)) {
78506+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78507+ return -EACCES;
78508+ }
78509+#endif
78510+ return 0;
78511+}
78512diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78513new file mode 100644
78514index 0000000..cce889e
78515--- /dev/null
78516+++ b/grsecurity/grsec_sysctl.c
78517@@ -0,0 +1,488 @@
78518+#include <linux/kernel.h>
78519+#include <linux/sched.h>
78520+#include <linux/sysctl.h>
78521+#include <linux/grsecurity.h>
78522+#include <linux/grinternal.h>
78523+
78524+int
78525+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78526+{
78527+#ifdef CONFIG_GRKERNSEC_SYSCTL
78528+ if (dirname == NULL || name == NULL)
78529+ return 0;
78530+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78531+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78532+ return -EACCES;
78533+ }
78534+#endif
78535+ return 0;
78536+}
78537+
78538+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78539+static int __maybe_unused __read_only one = 1;
78540+#endif
78541+
78542+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78543+ defined(CONFIG_GRKERNSEC_DENYUSB)
78544+struct ctl_table grsecurity_table[] = {
78545+#ifdef CONFIG_GRKERNSEC_SYSCTL
78546+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78547+#ifdef CONFIG_GRKERNSEC_IO
78548+ {
78549+ .procname = "disable_priv_io",
78550+ .data = &grsec_disable_privio,
78551+ .maxlen = sizeof(int),
78552+ .mode = 0600,
78553+ .proc_handler = &proc_dointvec,
78554+ },
78555+#endif
78556+#endif
78557+#ifdef CONFIG_GRKERNSEC_LINK
78558+ {
78559+ .procname = "linking_restrictions",
78560+ .data = &grsec_enable_link,
78561+ .maxlen = sizeof(int),
78562+ .mode = 0600,
78563+ .proc_handler = &proc_dointvec,
78564+ },
78565+#endif
78566+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78567+ {
78568+ .procname = "enforce_symlinksifowner",
78569+ .data = &grsec_enable_symlinkown,
78570+ .maxlen = sizeof(int),
78571+ .mode = 0600,
78572+ .proc_handler = &proc_dointvec,
78573+ },
78574+ {
78575+ .procname = "symlinkown_gid",
78576+ .data = &grsec_symlinkown_gid,
78577+ .maxlen = sizeof(int),
78578+ .mode = 0600,
78579+ .proc_handler = &proc_dointvec,
78580+ },
78581+#endif
78582+#ifdef CONFIG_GRKERNSEC_BRUTE
78583+ {
78584+ .procname = "deter_bruteforce",
78585+ .data = &grsec_enable_brute,
78586+ .maxlen = sizeof(int),
78587+ .mode = 0600,
78588+ .proc_handler = &proc_dointvec,
78589+ },
78590+#endif
78591+#ifdef CONFIG_GRKERNSEC_FIFO
78592+ {
78593+ .procname = "fifo_restrictions",
78594+ .data = &grsec_enable_fifo,
78595+ .maxlen = sizeof(int),
78596+ .mode = 0600,
78597+ .proc_handler = &proc_dointvec,
78598+ },
78599+#endif
78600+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78601+ {
78602+ .procname = "ptrace_readexec",
78603+ .data = &grsec_enable_ptrace_readexec,
78604+ .maxlen = sizeof(int),
78605+ .mode = 0600,
78606+ .proc_handler = &proc_dointvec,
78607+ },
78608+#endif
78609+#ifdef CONFIG_GRKERNSEC_SETXID
78610+ {
78611+ .procname = "consistent_setxid",
78612+ .data = &grsec_enable_setxid,
78613+ .maxlen = sizeof(int),
78614+ .mode = 0600,
78615+ .proc_handler = &proc_dointvec,
78616+ },
78617+#endif
78618+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78619+ {
78620+ .procname = "ip_blackhole",
78621+ .data = &grsec_enable_blackhole,
78622+ .maxlen = sizeof(int),
78623+ .mode = 0600,
78624+ .proc_handler = &proc_dointvec,
78625+ },
78626+ {
78627+ .procname = "lastack_retries",
78628+ .data = &grsec_lastack_retries,
78629+ .maxlen = sizeof(int),
78630+ .mode = 0600,
78631+ .proc_handler = &proc_dointvec,
78632+ },
78633+#endif
78634+#ifdef CONFIG_GRKERNSEC_EXECLOG
78635+ {
78636+ .procname = "exec_logging",
78637+ .data = &grsec_enable_execlog,
78638+ .maxlen = sizeof(int),
78639+ .mode = 0600,
78640+ .proc_handler = &proc_dointvec,
78641+ },
78642+#endif
78643+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78644+ {
78645+ .procname = "rwxmap_logging",
78646+ .data = &grsec_enable_log_rwxmaps,
78647+ .maxlen = sizeof(int),
78648+ .mode = 0600,
78649+ .proc_handler = &proc_dointvec,
78650+ },
78651+#endif
78652+#ifdef CONFIG_GRKERNSEC_SIGNAL
78653+ {
78654+ .procname = "signal_logging",
78655+ .data = &grsec_enable_signal,
78656+ .maxlen = sizeof(int),
78657+ .mode = 0600,
78658+ .proc_handler = &proc_dointvec,
78659+ },
78660+#endif
78661+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78662+ {
78663+ .procname = "forkfail_logging",
78664+ .data = &grsec_enable_forkfail,
78665+ .maxlen = sizeof(int),
78666+ .mode = 0600,
78667+ .proc_handler = &proc_dointvec,
78668+ },
78669+#endif
78670+#ifdef CONFIG_GRKERNSEC_TIME
78671+ {
78672+ .procname = "timechange_logging",
78673+ .data = &grsec_enable_time,
78674+ .maxlen = sizeof(int),
78675+ .mode = 0600,
78676+ .proc_handler = &proc_dointvec,
78677+ },
78678+#endif
78679+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78680+ {
78681+ .procname = "chroot_deny_shmat",
78682+ .data = &grsec_enable_chroot_shmat,
78683+ .maxlen = sizeof(int),
78684+ .mode = 0600,
78685+ .proc_handler = &proc_dointvec,
78686+ },
78687+#endif
78688+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78689+ {
78690+ .procname = "chroot_deny_unix",
78691+ .data = &grsec_enable_chroot_unix,
78692+ .maxlen = sizeof(int),
78693+ .mode = 0600,
78694+ .proc_handler = &proc_dointvec,
78695+ },
78696+#endif
78697+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78698+ {
78699+ .procname = "chroot_deny_mount",
78700+ .data = &grsec_enable_chroot_mount,
78701+ .maxlen = sizeof(int),
78702+ .mode = 0600,
78703+ .proc_handler = &proc_dointvec,
78704+ },
78705+#endif
78706+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78707+ {
78708+ .procname = "chroot_deny_fchdir",
78709+ .data = &grsec_enable_chroot_fchdir,
78710+ .maxlen = sizeof(int),
78711+ .mode = 0600,
78712+ .proc_handler = &proc_dointvec,
78713+ },
78714+#endif
78715+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78716+ {
78717+ .procname = "chroot_deny_chroot",
78718+ .data = &grsec_enable_chroot_double,
78719+ .maxlen = sizeof(int),
78720+ .mode = 0600,
78721+ .proc_handler = &proc_dointvec,
78722+ },
78723+#endif
78724+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78725+ {
78726+ .procname = "chroot_deny_pivot",
78727+ .data = &grsec_enable_chroot_pivot,
78728+ .maxlen = sizeof(int),
78729+ .mode = 0600,
78730+ .proc_handler = &proc_dointvec,
78731+ },
78732+#endif
78733+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78734+ {
78735+ .procname = "chroot_enforce_chdir",
78736+ .data = &grsec_enable_chroot_chdir,
78737+ .maxlen = sizeof(int),
78738+ .mode = 0600,
78739+ .proc_handler = &proc_dointvec,
78740+ },
78741+#endif
78742+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78743+ {
78744+ .procname = "chroot_deny_chmod",
78745+ .data = &grsec_enable_chroot_chmod,
78746+ .maxlen = sizeof(int),
78747+ .mode = 0600,
78748+ .proc_handler = &proc_dointvec,
78749+ },
78750+#endif
78751+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78752+ {
78753+ .procname = "chroot_deny_mknod",
78754+ .data = &grsec_enable_chroot_mknod,
78755+ .maxlen = sizeof(int),
78756+ .mode = 0600,
78757+ .proc_handler = &proc_dointvec,
78758+ },
78759+#endif
78760+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78761+ {
78762+ .procname = "chroot_restrict_nice",
78763+ .data = &grsec_enable_chroot_nice,
78764+ .maxlen = sizeof(int),
78765+ .mode = 0600,
78766+ .proc_handler = &proc_dointvec,
78767+ },
78768+#endif
78769+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78770+ {
78771+ .procname = "chroot_execlog",
78772+ .data = &grsec_enable_chroot_execlog,
78773+ .maxlen = sizeof(int),
78774+ .mode = 0600,
78775+ .proc_handler = &proc_dointvec,
78776+ },
78777+#endif
78778+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78779+ {
78780+ .procname = "chroot_caps",
78781+ .data = &grsec_enable_chroot_caps,
78782+ .maxlen = sizeof(int),
78783+ .mode = 0600,
78784+ .proc_handler = &proc_dointvec,
78785+ },
78786+#endif
78787+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78788+ {
78789+ .procname = "chroot_deny_bad_rename",
78790+ .data = &grsec_enable_chroot_rename,
78791+ .maxlen = sizeof(int),
78792+ .mode = 0600,
78793+ .proc_handler = &proc_dointvec,
78794+ },
78795+#endif
78796+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78797+ {
78798+ .procname = "chroot_deny_sysctl",
78799+ .data = &grsec_enable_chroot_sysctl,
78800+ .maxlen = sizeof(int),
78801+ .mode = 0600,
78802+ .proc_handler = &proc_dointvec,
78803+ },
78804+#endif
78805+#ifdef CONFIG_GRKERNSEC_TPE
78806+ {
78807+ .procname = "tpe",
78808+ .data = &grsec_enable_tpe,
78809+ .maxlen = sizeof(int),
78810+ .mode = 0600,
78811+ .proc_handler = &proc_dointvec,
78812+ },
78813+ {
78814+ .procname = "tpe_gid",
78815+ .data = &grsec_tpe_gid,
78816+ .maxlen = sizeof(int),
78817+ .mode = 0600,
78818+ .proc_handler = &proc_dointvec,
78819+ },
78820+#endif
78821+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78822+ {
78823+ .procname = "tpe_invert",
78824+ .data = &grsec_enable_tpe_invert,
78825+ .maxlen = sizeof(int),
78826+ .mode = 0600,
78827+ .proc_handler = &proc_dointvec,
78828+ },
78829+#endif
78830+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78831+ {
78832+ .procname = "tpe_restrict_all",
78833+ .data = &grsec_enable_tpe_all,
78834+ .maxlen = sizeof(int),
78835+ .mode = 0600,
78836+ .proc_handler = &proc_dointvec,
78837+ },
78838+#endif
78839+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78840+ {
78841+ .procname = "socket_all",
78842+ .data = &grsec_enable_socket_all,
78843+ .maxlen = sizeof(int),
78844+ .mode = 0600,
78845+ .proc_handler = &proc_dointvec,
78846+ },
78847+ {
78848+ .procname = "socket_all_gid",
78849+ .data = &grsec_socket_all_gid,
78850+ .maxlen = sizeof(int),
78851+ .mode = 0600,
78852+ .proc_handler = &proc_dointvec,
78853+ },
78854+#endif
78855+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78856+ {
78857+ .procname = "socket_client",
78858+ .data = &grsec_enable_socket_client,
78859+ .maxlen = sizeof(int),
78860+ .mode = 0600,
78861+ .proc_handler = &proc_dointvec,
78862+ },
78863+ {
78864+ .procname = "socket_client_gid",
78865+ .data = &grsec_socket_client_gid,
78866+ .maxlen = sizeof(int),
78867+ .mode = 0600,
78868+ .proc_handler = &proc_dointvec,
78869+ },
78870+#endif
78871+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78872+ {
78873+ .procname = "socket_server",
78874+ .data = &grsec_enable_socket_server,
78875+ .maxlen = sizeof(int),
78876+ .mode = 0600,
78877+ .proc_handler = &proc_dointvec,
78878+ },
78879+ {
78880+ .procname = "socket_server_gid",
78881+ .data = &grsec_socket_server_gid,
78882+ .maxlen = sizeof(int),
78883+ .mode = 0600,
78884+ .proc_handler = &proc_dointvec,
78885+ },
78886+#endif
78887+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78888+ {
78889+ .procname = "audit_group",
78890+ .data = &grsec_enable_group,
78891+ .maxlen = sizeof(int),
78892+ .mode = 0600,
78893+ .proc_handler = &proc_dointvec,
78894+ },
78895+ {
78896+ .procname = "audit_gid",
78897+ .data = &grsec_audit_gid,
78898+ .maxlen = sizeof(int),
78899+ .mode = 0600,
78900+ .proc_handler = &proc_dointvec,
78901+ },
78902+#endif
78903+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78904+ {
78905+ .procname = "audit_chdir",
78906+ .data = &grsec_enable_chdir,
78907+ .maxlen = sizeof(int),
78908+ .mode = 0600,
78909+ .proc_handler = &proc_dointvec,
78910+ },
78911+#endif
78912+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78913+ {
78914+ .procname = "audit_mount",
78915+ .data = &grsec_enable_mount,
78916+ .maxlen = sizeof(int),
78917+ .mode = 0600,
78918+ .proc_handler = &proc_dointvec,
78919+ },
78920+#endif
78921+#ifdef CONFIG_GRKERNSEC_DMESG
78922+ {
78923+ .procname = "dmesg",
78924+ .data = &grsec_enable_dmesg,
78925+ .maxlen = sizeof(int),
78926+ .mode = 0600,
78927+ .proc_handler = &proc_dointvec,
78928+ },
78929+#endif
78930+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78931+ {
78932+ .procname = "chroot_findtask",
78933+ .data = &grsec_enable_chroot_findtask,
78934+ .maxlen = sizeof(int),
78935+ .mode = 0600,
78936+ .proc_handler = &proc_dointvec,
78937+ },
78938+#endif
78939+#ifdef CONFIG_GRKERNSEC_RESLOG
78940+ {
78941+ .procname = "resource_logging",
78942+ .data = &grsec_resource_logging,
78943+ .maxlen = sizeof(int),
78944+ .mode = 0600,
78945+ .proc_handler = &proc_dointvec,
78946+ },
78947+#endif
78948+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78949+ {
78950+ .procname = "audit_ptrace",
78951+ .data = &grsec_enable_audit_ptrace,
78952+ .maxlen = sizeof(int),
78953+ .mode = 0600,
78954+ .proc_handler = &proc_dointvec,
78955+ },
78956+#endif
78957+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78958+ {
78959+ .procname = "harden_ptrace",
78960+ .data = &grsec_enable_harden_ptrace,
78961+ .maxlen = sizeof(int),
78962+ .mode = 0600,
78963+ .proc_handler = &proc_dointvec,
78964+ },
78965+#endif
78966+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78967+ {
78968+ .procname = "harden_ipc",
78969+ .data = &grsec_enable_harden_ipc,
78970+ .maxlen = sizeof(int),
78971+ .mode = 0600,
78972+ .proc_handler = &proc_dointvec,
78973+ },
78974+#endif
78975+ {
78976+ .procname = "grsec_lock",
78977+ .data = &grsec_lock,
78978+ .maxlen = sizeof(int),
78979+ .mode = 0600,
78980+ .proc_handler = &proc_dointvec,
78981+ },
78982+#endif
78983+#ifdef CONFIG_GRKERNSEC_ROFS
78984+ {
78985+ .procname = "romount_protect",
78986+ .data = &grsec_enable_rofs,
78987+ .maxlen = sizeof(int),
78988+ .mode = 0600,
78989+ .proc_handler = &proc_dointvec_minmax,
78990+ .extra1 = &one,
78991+ .extra2 = &one,
78992+ },
78993+#endif
78994+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
78995+ {
78996+ .procname = "deny_new_usb",
78997+ .data = &grsec_deny_new_usb,
78998+ .maxlen = sizeof(int),
78999+ .mode = 0600,
79000+ .proc_handler = &proc_dointvec,
79001+ },
79002+#endif
79003+ { }
79004+};
79005+#endif
79006diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79007new file mode 100644
79008index 0000000..61b514e
79009--- /dev/null
79010+++ b/grsecurity/grsec_time.c
79011@@ -0,0 +1,16 @@
79012+#include <linux/kernel.h>
79013+#include <linux/sched.h>
79014+#include <linux/grinternal.h>
79015+#include <linux/module.h>
79016+
79017+void
79018+gr_log_timechange(void)
79019+{
79020+#ifdef CONFIG_GRKERNSEC_TIME
79021+ if (grsec_enable_time)
79022+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79023+#endif
79024+ return;
79025+}
79026+
79027+EXPORT_SYMBOL_GPL(gr_log_timechange);
79028diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79029new file mode 100644
79030index 0000000..d1953de
79031--- /dev/null
79032+++ b/grsecurity/grsec_tpe.c
79033@@ -0,0 +1,78 @@
79034+#include <linux/kernel.h>
79035+#include <linux/sched.h>
79036+#include <linux/file.h>
79037+#include <linux/fs.h>
79038+#include <linux/grinternal.h>
79039+
79040+extern int gr_acl_tpe_check(void);
79041+
79042+int
79043+gr_tpe_allow(const struct file *file)
79044+{
79045+#ifdef CONFIG_GRKERNSEC
79046+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79047+ struct inode *file_inode = file->f_path.dentry->d_inode;
79048+ const struct cred *cred = current_cred();
79049+ char *msg = NULL;
79050+ char *msg2 = NULL;
79051+
79052+ // never restrict root
79053+ if (gr_is_global_root(cred->uid))
79054+ return 1;
79055+
79056+ if (grsec_enable_tpe) {
79057+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79058+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79059+ msg = "not being in trusted group";
79060+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79061+ msg = "being in untrusted group";
79062+#else
79063+ if (in_group_p(grsec_tpe_gid))
79064+ msg = "being in untrusted group";
79065+#endif
79066+ }
79067+ if (!msg && gr_acl_tpe_check())
79068+ msg = "being in untrusted role";
79069+
79070+ // not in any affected group/role
79071+ if (!msg)
79072+ goto next_check;
79073+
79074+ if (gr_is_global_nonroot(inode->i_uid))
79075+ msg2 = "file in non-root-owned directory";
79076+ else if (inode->i_mode & S_IWOTH)
79077+ msg2 = "file in world-writable directory";
79078+ else if (inode->i_mode & S_IWGRP)
79079+ msg2 = "file in group-writable directory";
79080+ else if (file_inode->i_mode & S_IWOTH)
79081+ msg2 = "file is world-writable";
79082+
79083+ if (msg && msg2) {
79084+ char fullmsg[70] = {0};
79085+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79086+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79087+ return 0;
79088+ }
79089+ msg = NULL;
79090+next_check:
79091+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79092+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79093+ return 1;
79094+
79095+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79096+ msg = "directory not owned by user";
79097+ else if (inode->i_mode & S_IWOTH)
79098+ msg = "file in world-writable directory";
79099+ else if (inode->i_mode & S_IWGRP)
79100+ msg = "file in group-writable directory";
79101+ else if (file_inode->i_mode & S_IWOTH)
79102+ msg = "file is world-writable";
79103+
79104+ if (msg) {
79105+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79106+ return 0;
79107+ }
79108+#endif
79109+#endif
79110+ return 1;
79111+}
79112diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79113new file mode 100644
79114index 0000000..ae02d8e
79115--- /dev/null
79116+++ b/grsecurity/grsec_usb.c
79117@@ -0,0 +1,15 @@
79118+#include <linux/kernel.h>
79119+#include <linux/grinternal.h>
79120+#include <linux/module.h>
79121+
79122+int gr_handle_new_usb(void)
79123+{
79124+#ifdef CONFIG_GRKERNSEC_DENYUSB
79125+ if (grsec_deny_new_usb) {
79126+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79127+ return 1;
79128+ }
79129+#endif
79130+ return 0;
79131+}
79132+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79133diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79134new file mode 100644
79135index 0000000..158b330
79136--- /dev/null
79137+++ b/grsecurity/grsum.c
79138@@ -0,0 +1,64 @@
79139+#include <linux/err.h>
79140+#include <linux/kernel.h>
79141+#include <linux/sched.h>
79142+#include <linux/mm.h>
79143+#include <linux/scatterlist.h>
79144+#include <linux/crypto.h>
79145+#include <linux/gracl.h>
79146+
79147+
79148+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79149+#error "crypto and sha256 must be built into the kernel"
79150+#endif
79151+
79152+int
79153+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79154+{
79155+ struct crypto_hash *tfm;
79156+ struct hash_desc desc;
79157+ struct scatterlist sg[2];
79158+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79159+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79160+ unsigned long *sumptr = (unsigned long *)sum;
79161+ int cryptres;
79162+ int retval = 1;
79163+ volatile int mismatched = 0;
79164+ volatile int dummy = 0;
79165+ unsigned int i;
79166+
79167+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79168+ if (IS_ERR(tfm)) {
79169+ /* should never happen, since sha256 should be built in */
79170+ memset(entry->pw, 0, GR_PW_LEN);
79171+ return 1;
79172+ }
79173+
79174+ sg_init_table(sg, 2);
79175+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79176+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79177+
79178+ desc.tfm = tfm;
79179+ desc.flags = 0;
79180+
79181+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79182+ temp_sum);
79183+
79184+ memset(entry->pw, 0, GR_PW_LEN);
79185+
79186+ if (cryptres)
79187+ goto out;
79188+
79189+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79190+ if (sumptr[i] != tmpsumptr[i])
79191+ mismatched = 1;
79192+ else
79193+ dummy = 1; // waste a cycle
79194+
79195+ if (!mismatched)
79196+ retval = dummy - 1;
79197+
79198+out:
79199+ crypto_free_hash(tfm);
79200+
79201+ return retval;
79202+}
79203diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79204index 77ff547..181834f 100644
79205--- a/include/asm-generic/4level-fixup.h
79206+++ b/include/asm-generic/4level-fixup.h
79207@@ -13,8 +13,10 @@
79208 #define pmd_alloc(mm, pud, address) \
79209 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79210 NULL: pmd_offset(pud, address))
79211+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79212
79213 #define pud_alloc(mm, pgd, address) (pgd)
79214+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79215 #define pud_offset(pgd, start) (pgd)
79216 #define pud_none(pud) 0
79217 #define pud_bad(pud) 0
79218diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79219index b7babf0..1e4b4f1 100644
79220--- a/include/asm-generic/atomic-long.h
79221+++ b/include/asm-generic/atomic-long.h
79222@@ -22,6 +22,12 @@
79223
79224 typedef atomic64_t atomic_long_t;
79225
79226+#ifdef CONFIG_PAX_REFCOUNT
79227+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79228+#else
79229+typedef atomic64_t atomic_long_unchecked_t;
79230+#endif
79231+
79232 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79233
79234 static inline long atomic_long_read(atomic_long_t *l)
79235@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79236 return (long)atomic64_read(v);
79237 }
79238
79239+#ifdef CONFIG_PAX_REFCOUNT
79240+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79241+{
79242+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79243+
79244+ return (long)atomic64_read_unchecked(v);
79245+}
79246+#endif
79247+
79248 static inline void atomic_long_set(atomic_long_t *l, long i)
79249 {
79250 atomic64_t *v = (atomic64_t *)l;
79251@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79252 atomic64_set(v, i);
79253 }
79254
79255+#ifdef CONFIG_PAX_REFCOUNT
79256+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79257+{
79258+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79259+
79260+ atomic64_set_unchecked(v, i);
79261+}
79262+#endif
79263+
79264 static inline void atomic_long_inc(atomic_long_t *l)
79265 {
79266 atomic64_t *v = (atomic64_t *)l;
79267@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79268 atomic64_inc(v);
79269 }
79270
79271+#ifdef CONFIG_PAX_REFCOUNT
79272+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79273+{
79274+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79275+
79276+ atomic64_inc_unchecked(v);
79277+}
79278+#endif
79279+
79280 static inline void atomic_long_dec(atomic_long_t *l)
79281 {
79282 atomic64_t *v = (atomic64_t *)l;
79283@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79284 atomic64_dec(v);
79285 }
79286
79287+#ifdef CONFIG_PAX_REFCOUNT
79288+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79289+{
79290+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79291+
79292+ atomic64_dec_unchecked(v);
79293+}
79294+#endif
79295+
79296 static inline void atomic_long_add(long i, atomic_long_t *l)
79297 {
79298 atomic64_t *v = (atomic64_t *)l;
79299@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79300 atomic64_add(i, v);
79301 }
79302
79303+#ifdef CONFIG_PAX_REFCOUNT
79304+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79305+{
79306+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79307+
79308+ atomic64_add_unchecked(i, v);
79309+}
79310+#endif
79311+
79312 static inline void atomic_long_sub(long i, atomic_long_t *l)
79313 {
79314 atomic64_t *v = (atomic64_t *)l;
79315@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79316 atomic64_sub(i, v);
79317 }
79318
79319+#ifdef CONFIG_PAX_REFCOUNT
79320+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79321+{
79322+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79323+
79324+ atomic64_sub_unchecked(i, v);
79325+}
79326+#endif
79327+
79328 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79329 {
79330 atomic64_t *v = (atomic64_t *)l;
79331@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79332 return atomic64_add_negative(i, v);
79333 }
79334
79335-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79336+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79337 {
79338 atomic64_t *v = (atomic64_t *)l;
79339
79340 return (long)atomic64_add_return(i, v);
79341 }
79342
79343+#ifdef CONFIG_PAX_REFCOUNT
79344+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79345+{
79346+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79347+
79348+ return (long)atomic64_add_return_unchecked(i, v);
79349+}
79350+#endif
79351+
79352 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79353 {
79354 atomic64_t *v = (atomic64_t *)l;
79355@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79356 return (long)atomic64_inc_return(v);
79357 }
79358
79359+#ifdef CONFIG_PAX_REFCOUNT
79360+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79361+{
79362+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79363+
79364+ return (long)atomic64_inc_return_unchecked(v);
79365+}
79366+#endif
79367+
79368 static inline long atomic_long_dec_return(atomic_long_t *l)
79369 {
79370 atomic64_t *v = (atomic64_t *)l;
79371@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79372
79373 typedef atomic_t atomic_long_t;
79374
79375+#ifdef CONFIG_PAX_REFCOUNT
79376+typedef atomic_unchecked_t atomic_long_unchecked_t;
79377+#else
79378+typedef atomic_t atomic_long_unchecked_t;
79379+#endif
79380+
79381 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79382 static inline long atomic_long_read(atomic_long_t *l)
79383 {
79384@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79385 return (long)atomic_read(v);
79386 }
79387
79388+#ifdef CONFIG_PAX_REFCOUNT
79389+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79390+{
79391+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79392+
79393+ return (long)atomic_read_unchecked(v);
79394+}
79395+#endif
79396+
79397 static inline void atomic_long_set(atomic_long_t *l, long i)
79398 {
79399 atomic_t *v = (atomic_t *)l;
79400@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79401 atomic_set(v, i);
79402 }
79403
79404+#ifdef CONFIG_PAX_REFCOUNT
79405+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79406+{
79407+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79408+
79409+ atomic_set_unchecked(v, i);
79410+}
79411+#endif
79412+
79413 static inline void atomic_long_inc(atomic_long_t *l)
79414 {
79415 atomic_t *v = (atomic_t *)l;
79416@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79417 atomic_inc(v);
79418 }
79419
79420+#ifdef CONFIG_PAX_REFCOUNT
79421+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79422+{
79423+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79424+
79425+ atomic_inc_unchecked(v);
79426+}
79427+#endif
79428+
79429 static inline void atomic_long_dec(atomic_long_t *l)
79430 {
79431 atomic_t *v = (atomic_t *)l;
79432@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79433 atomic_dec(v);
79434 }
79435
79436+#ifdef CONFIG_PAX_REFCOUNT
79437+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79438+{
79439+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79440+
79441+ atomic_dec_unchecked(v);
79442+}
79443+#endif
79444+
79445 static inline void atomic_long_add(long i, atomic_long_t *l)
79446 {
79447 atomic_t *v = (atomic_t *)l;
79448@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79449 atomic_add(i, v);
79450 }
79451
79452+#ifdef CONFIG_PAX_REFCOUNT
79453+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79454+{
79455+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79456+
79457+ atomic_add_unchecked(i, v);
79458+}
79459+#endif
79460+
79461 static inline void atomic_long_sub(long i, atomic_long_t *l)
79462 {
79463 atomic_t *v = (atomic_t *)l;
79464@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79465 atomic_sub(i, v);
79466 }
79467
79468+#ifdef CONFIG_PAX_REFCOUNT
79469+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79470+{
79471+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79472+
79473+ atomic_sub_unchecked(i, v);
79474+}
79475+#endif
79476+
79477 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79478 {
79479 atomic_t *v = (atomic_t *)l;
79480@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79481 return atomic_add_negative(i, v);
79482 }
79483
79484-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79485+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79486 {
79487 atomic_t *v = (atomic_t *)l;
79488
79489 return (long)atomic_add_return(i, v);
79490 }
79491
79492+#ifdef CONFIG_PAX_REFCOUNT
79493+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79494+{
79495+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79496+
79497+ return (long)atomic_add_return_unchecked(i, v);
79498+}
79499+
79500+#endif
79501+
79502 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79503 {
79504 atomic_t *v = (atomic_t *)l;
79505@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79506 return (long)atomic_inc_return(v);
79507 }
79508
79509+#ifdef CONFIG_PAX_REFCOUNT
79510+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79511+{
79512+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79513+
79514+ return (long)atomic_inc_return_unchecked(v);
79515+}
79516+#endif
79517+
79518 static inline long atomic_long_dec_return(atomic_long_t *l)
79519 {
79520 atomic_t *v = (atomic_t *)l;
79521@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79522
79523 #endif /* BITS_PER_LONG == 64 */
79524
79525+#ifdef CONFIG_PAX_REFCOUNT
79526+static inline void pax_refcount_needs_these_functions(void)
79527+{
79528+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79529+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79530+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79531+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79532+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79533+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79534+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79535+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79536+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79537+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79538+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79539+#ifdef CONFIG_X86
79540+ atomic_clear_mask_unchecked(0, NULL);
79541+ atomic_set_mask_unchecked(0, NULL);
79542+#endif
79543+
79544+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79545+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79546+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79547+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79548+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79549+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79550+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79551+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79552+}
79553+#else
79554+#define atomic_read_unchecked(v) atomic_read(v)
79555+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79556+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79557+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79558+#define atomic_inc_unchecked(v) atomic_inc(v)
79559+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79560+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79561+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79562+#define atomic_dec_unchecked(v) atomic_dec(v)
79563+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79564+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79565+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79566+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79567+
79568+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79569+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79570+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79571+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79572+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79573+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79574+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79575+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79576+#endif
79577+
79578 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79579diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79580index 30ad9c8..c70c170 100644
79581--- a/include/asm-generic/atomic64.h
79582+++ b/include/asm-generic/atomic64.h
79583@@ -16,6 +16,8 @@ typedef struct {
79584 long long counter;
79585 } atomic64_t;
79586
79587+typedef atomic64_t atomic64_unchecked_t;
79588+
79589 #define ATOMIC64_INIT(i) { (i) }
79590
79591 extern long long atomic64_read(const atomic64_t *v);
79592@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79593 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79594 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79595
79596+#define atomic64_read_unchecked(v) atomic64_read(v)
79597+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79598+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79599+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79600+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79601+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79602+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79603+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79604+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79605+
79606 #endif /* _ASM_GENERIC_ATOMIC64_H */
79607diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79608index f5c40b0..e902f9d 100644
79609--- a/include/asm-generic/barrier.h
79610+++ b/include/asm-generic/barrier.h
79611@@ -82,7 +82,7 @@
79612 do { \
79613 compiletime_assert_atomic_type(*p); \
79614 smp_mb(); \
79615- ACCESS_ONCE(*p) = (v); \
79616+ ACCESS_ONCE_RW(*p) = (v); \
79617 } while (0)
79618
79619 #define smp_load_acquire(p) \
79620diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79621index a60a7cc..0fe12f2 100644
79622--- a/include/asm-generic/bitops/__fls.h
79623+++ b/include/asm-generic/bitops/__fls.h
79624@@ -9,7 +9,7 @@
79625 *
79626 * Undefined if no set bit exists, so code should check against 0 first.
79627 */
79628-static __always_inline unsigned long __fls(unsigned long word)
79629+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79630 {
79631 int num = BITS_PER_LONG - 1;
79632
79633diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79634index 0576d1f..dad6c71 100644
79635--- a/include/asm-generic/bitops/fls.h
79636+++ b/include/asm-generic/bitops/fls.h
79637@@ -9,7 +9,7 @@
79638 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79639 */
79640
79641-static __always_inline int fls(int x)
79642+static __always_inline int __intentional_overflow(-1) fls(int x)
79643 {
79644 int r = 32;
79645
79646diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79647index b097cf8..3d40e14 100644
79648--- a/include/asm-generic/bitops/fls64.h
79649+++ b/include/asm-generic/bitops/fls64.h
79650@@ -15,7 +15,7 @@
79651 * at position 64.
79652 */
79653 #if BITS_PER_LONG == 32
79654-static __always_inline int fls64(__u64 x)
79655+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79656 {
79657 __u32 h = x >> 32;
79658 if (h)
79659@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79660 return fls(x);
79661 }
79662 #elif BITS_PER_LONG == 64
79663-static __always_inline int fls64(__u64 x)
79664+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79665 {
79666 if (x == 0)
79667 return 0;
79668diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79669index 1bfcfe5..e04c5c9 100644
79670--- a/include/asm-generic/cache.h
79671+++ b/include/asm-generic/cache.h
79672@@ -6,7 +6,7 @@
79673 * cache lines need to provide their own cache.h.
79674 */
79675
79676-#define L1_CACHE_SHIFT 5
79677-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79678+#define L1_CACHE_SHIFT 5UL
79679+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79680
79681 #endif /* __ASM_GENERIC_CACHE_H */
79682diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79683index 0d68a1e..b74a761 100644
79684--- a/include/asm-generic/emergency-restart.h
79685+++ b/include/asm-generic/emergency-restart.h
79686@@ -1,7 +1,7 @@
79687 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79688 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79689
79690-static inline void machine_emergency_restart(void)
79691+static inline __noreturn void machine_emergency_restart(void)
79692 {
79693 machine_restart(NULL);
79694 }
79695diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79696index 90f99c7..00ce236 100644
79697--- a/include/asm-generic/kmap_types.h
79698+++ b/include/asm-generic/kmap_types.h
79699@@ -2,9 +2,9 @@
79700 #define _ASM_GENERIC_KMAP_TYPES_H
79701
79702 #ifdef __WITH_KM_FENCE
79703-# define KM_TYPE_NR 41
79704+# define KM_TYPE_NR 42
79705 #else
79706-# define KM_TYPE_NR 20
79707+# define KM_TYPE_NR 21
79708 #endif
79709
79710 #endif
79711diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79712index 9ceb03b..62b0b8f 100644
79713--- a/include/asm-generic/local.h
79714+++ b/include/asm-generic/local.h
79715@@ -23,24 +23,37 @@ typedef struct
79716 atomic_long_t a;
79717 } local_t;
79718
79719+typedef struct {
79720+ atomic_long_unchecked_t a;
79721+} local_unchecked_t;
79722+
79723 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79724
79725 #define local_read(l) atomic_long_read(&(l)->a)
79726+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79727 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79728+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79729 #define local_inc(l) atomic_long_inc(&(l)->a)
79730+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79731 #define local_dec(l) atomic_long_dec(&(l)->a)
79732+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79733 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79734+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79735 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79736+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79737
79738 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79739 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79740 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79741 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79742 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79743+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79744 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79745 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79746+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79747
79748 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79749+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79750 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79751 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79752 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79753diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79754index 725612b..9cc513a 100644
79755--- a/include/asm-generic/pgtable-nopmd.h
79756+++ b/include/asm-generic/pgtable-nopmd.h
79757@@ -1,14 +1,19 @@
79758 #ifndef _PGTABLE_NOPMD_H
79759 #define _PGTABLE_NOPMD_H
79760
79761-#ifndef __ASSEMBLY__
79762-
79763 #include <asm-generic/pgtable-nopud.h>
79764
79765-struct mm_struct;
79766-
79767 #define __PAGETABLE_PMD_FOLDED
79768
79769+#define PMD_SHIFT PUD_SHIFT
79770+#define PTRS_PER_PMD 1
79771+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79772+#define PMD_MASK (~(PMD_SIZE-1))
79773+
79774+#ifndef __ASSEMBLY__
79775+
79776+struct mm_struct;
79777+
79778 /*
79779 * Having the pmd type consist of a pud gets the size right, and allows
79780 * us to conceptually access the pud entry that this pmd is folded into
79781@@ -16,11 +21,6 @@ struct mm_struct;
79782 */
79783 typedef struct { pud_t pud; } pmd_t;
79784
79785-#define PMD_SHIFT PUD_SHIFT
79786-#define PTRS_PER_PMD 1
79787-#define PMD_SIZE (1UL << PMD_SHIFT)
79788-#define PMD_MASK (~(PMD_SIZE-1))
79789-
79790 /*
79791 * The "pud_xxx()" functions here are trivial for a folded two-level
79792 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79793diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79794index 810431d..0ec4804f 100644
79795--- a/include/asm-generic/pgtable-nopud.h
79796+++ b/include/asm-generic/pgtable-nopud.h
79797@@ -1,10 +1,15 @@
79798 #ifndef _PGTABLE_NOPUD_H
79799 #define _PGTABLE_NOPUD_H
79800
79801-#ifndef __ASSEMBLY__
79802-
79803 #define __PAGETABLE_PUD_FOLDED
79804
79805+#define PUD_SHIFT PGDIR_SHIFT
79806+#define PTRS_PER_PUD 1
79807+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79808+#define PUD_MASK (~(PUD_SIZE-1))
79809+
79810+#ifndef __ASSEMBLY__
79811+
79812 /*
79813 * Having the pud type consist of a pgd gets the size right, and allows
79814 * us to conceptually access the pgd entry that this pud is folded into
79815@@ -12,11 +17,6 @@
79816 */
79817 typedef struct { pgd_t pgd; } pud_t;
79818
79819-#define PUD_SHIFT PGDIR_SHIFT
79820-#define PTRS_PER_PUD 1
79821-#define PUD_SIZE (1UL << PUD_SHIFT)
79822-#define PUD_MASK (~(PUD_SIZE-1))
79823-
79824 /*
79825 * The "pgd_xxx()" functions here are trivial for a folded two-level
79826 * setup: the pud is never bad, and a pud always exists (as it's folded
79827@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79828 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79829
79830 #define pgd_populate(mm, pgd, pud) do { } while (0)
79831+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79832 /*
79833 * (puds are folded into pgds so this doesn't get actually called,
79834 * but the define is needed for a generic inline function.)
79835diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79836index 177d597..2826237 100644
79837--- a/include/asm-generic/pgtable.h
79838+++ b/include/asm-generic/pgtable.h
79839@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79840 }
79841 #endif /* CONFIG_NUMA_BALANCING */
79842
79843+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79844+#ifdef CONFIG_PAX_KERNEXEC
79845+#error KERNEXEC requires pax_open_kernel
79846+#else
79847+static inline unsigned long pax_open_kernel(void) { return 0; }
79848+#endif
79849+#endif
79850+
79851+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79852+#ifdef CONFIG_PAX_KERNEXEC
79853+#error KERNEXEC requires pax_close_kernel
79854+#else
79855+static inline unsigned long pax_close_kernel(void) { return 0; }
79856+#endif
79857+#endif
79858+
79859 #endif /* CONFIG_MMU */
79860
79861 #endif /* !__ASSEMBLY__ */
79862diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79863index 72d8803..cb9749c 100644
79864--- a/include/asm-generic/uaccess.h
79865+++ b/include/asm-generic/uaccess.h
79866@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79867 return __clear_user(to, n);
79868 }
79869
79870+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79871+#ifdef CONFIG_PAX_MEMORY_UDEREF
79872+#error UDEREF requires pax_open_userland
79873+#else
79874+static inline unsigned long pax_open_userland(void) { return 0; }
79875+#endif
79876+#endif
79877+
79878+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79879+#ifdef CONFIG_PAX_MEMORY_UDEREF
79880+#error UDEREF requires pax_close_userland
79881+#else
79882+static inline unsigned long pax_close_userland(void) { return 0; }
79883+#endif
79884+#endif
79885+
79886 #endif /* __ASM_GENERIC_UACCESS_H */
79887diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79888index bee5d68..8d362d1 100644
79889--- a/include/asm-generic/vmlinux.lds.h
79890+++ b/include/asm-generic/vmlinux.lds.h
79891@@ -234,6 +234,7 @@
79892 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79893 VMLINUX_SYMBOL(__start_rodata) = .; \
79894 *(.rodata) *(.rodata.*) \
79895+ *(.data..read_only) \
79896 *(__vermagic) /* Kernel version magic */ \
79897 . = ALIGN(8); \
79898 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79899@@ -726,17 +727,18 @@
79900 * section in the linker script will go there too. @phdr should have
79901 * a leading colon.
79902 *
79903- * Note that this macros defines __per_cpu_load as an absolute symbol.
79904+ * Note that this macros defines per_cpu_load as an absolute symbol.
79905 * If there is no need to put the percpu section at a predetermined
79906 * address, use PERCPU_SECTION.
79907 */
79908 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79909- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79910- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79911+ per_cpu_load = .; \
79912+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79913 - LOAD_OFFSET) { \
79914+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79915 PERCPU_INPUT(cacheline) \
79916 } phdr \
79917- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79918+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79919
79920 /**
79921 * PERCPU_SECTION - define output section for percpu area, simple version
79922diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
79923index 623a59c..1e79ab9 100644
79924--- a/include/crypto/algapi.h
79925+++ b/include/crypto/algapi.h
79926@@ -34,7 +34,7 @@ struct crypto_type {
79927 unsigned int maskclear;
79928 unsigned int maskset;
79929 unsigned int tfmsize;
79930-};
79931+} __do_const;
79932
79933 struct crypto_instance {
79934 struct crypto_alg alg;
79935diff --git a/include/drm/drmP.h b/include/drm/drmP.h
79936index e1b2e8b..2697bd2 100644
79937--- a/include/drm/drmP.h
79938+++ b/include/drm/drmP.h
79939@@ -59,6 +59,7 @@
79940
79941 #include <asm/mman.h>
79942 #include <asm/pgalloc.h>
79943+#include <asm/local.h>
79944 #include <asm/uaccess.h>
79945
79946 #include <uapi/drm/drm.h>
79947@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
79948 * \param cmd command.
79949 * \param arg argument.
79950 */
79951-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
79952+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
79953+ struct drm_file *file_priv);
79954+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
79955 struct drm_file *file_priv);
79956
79957-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79958+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
79959 unsigned long arg);
79960
79961 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79962@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79963 struct drm_ioctl_desc {
79964 unsigned int cmd;
79965 int flags;
79966- drm_ioctl_t *func;
79967+ drm_ioctl_t func;
79968 unsigned int cmd_drv;
79969 const char *name;
79970-};
79971+} __do_const;
79972
79973 /**
79974 * Creates a driver or general drm_ioctl_desc array entry for the given
79975@@ -629,7 +632,8 @@ struct drm_info_list {
79976 int (*show)(struct seq_file*, void*); /** show callback */
79977 u32 driver_features; /**< Required driver features for this entry */
79978 void *data;
79979-};
79980+} __do_const;
79981+typedef struct drm_info_list __no_const drm_info_list_no_const;
79982
79983 /**
79984 * debugfs node structure. This structure represents a debugfs file.
79985@@ -713,7 +717,7 @@ struct drm_device {
79986
79987 /** \name Usage Counters */
79988 /*@{ */
79989- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79990+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79991 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
79992 int buf_use; /**< Buffers in use -- cannot alloc */
79993 atomic_t buf_alloc; /**< Buffer allocation in progress */
79994diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
79995index 7adbb65..2a1eb1f 100644
79996--- a/include/drm/drm_crtc_helper.h
79997+++ b/include/drm/drm_crtc_helper.h
79998@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
79999 struct drm_connector *connector);
80000 /* disable encoder when not in use - more explicit than dpms off */
80001 void (*disable)(struct drm_encoder *encoder);
80002-};
80003+} __no_const;
80004
80005 /**
80006 * drm_connector_helper_funcs - helper operations for connectors
80007diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80008index d016dc5..3951fe0 100644
80009--- a/include/drm/i915_pciids.h
80010+++ b/include/drm/i915_pciids.h
80011@@ -37,7 +37,7 @@
80012 */
80013 #define INTEL_VGA_DEVICE(id, info) { \
80014 0x8086, id, \
80015- ~0, ~0, \
80016+ PCI_ANY_ID, PCI_ANY_ID, \
80017 0x030000, 0xff0000, \
80018 (unsigned long) info }
80019
80020diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80021index 72dcbe8..8db58d7 100644
80022--- a/include/drm/ttm/ttm_memory.h
80023+++ b/include/drm/ttm/ttm_memory.h
80024@@ -48,7 +48,7 @@
80025
80026 struct ttm_mem_shrink {
80027 int (*do_shrink) (struct ttm_mem_shrink *);
80028-};
80029+} __no_const;
80030
80031 /**
80032 * struct ttm_mem_global - Global memory accounting structure.
80033diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80034index 49a8284..9643967 100644
80035--- a/include/drm/ttm/ttm_page_alloc.h
80036+++ b/include/drm/ttm/ttm_page_alloc.h
80037@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80038 */
80039 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80040
80041+struct device;
80042 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80043 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80044
80045diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80046index 4b840e8..155d235 100644
80047--- a/include/keys/asymmetric-subtype.h
80048+++ b/include/keys/asymmetric-subtype.h
80049@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80050 /* Verify the signature on a key of this subtype (optional) */
80051 int (*verify_signature)(const struct key *key,
80052 const struct public_key_signature *sig);
80053-};
80054+} __do_const;
80055
80056 /**
80057 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80058diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80059index c1da539..1dcec55 100644
80060--- a/include/linux/atmdev.h
80061+++ b/include/linux/atmdev.h
80062@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80063 #endif
80064
80065 struct k_atm_aal_stats {
80066-#define __HANDLE_ITEM(i) atomic_t i
80067+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80068 __AAL_STAT_ITEMS
80069 #undef __HANDLE_ITEM
80070 };
80071@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80072 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80073 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80074 struct module *owner;
80075-};
80076+} __do_const ;
80077
80078 struct atmphy_ops {
80079 int (*start)(struct atm_dev *dev);
80080diff --git a/include/linux/atomic.h b/include/linux/atomic.h
80081index 5b08a85..60922fb 100644
80082--- a/include/linux/atomic.h
80083+++ b/include/linux/atomic.h
80084@@ -12,7 +12,7 @@
80085 * Atomically adds @a to @v, so long as @v was not already @u.
80086 * Returns non-zero if @v was not @u, and zero otherwise.
80087 */
80088-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80089+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80090 {
80091 return __atomic_add_unless(v, a, u) != u;
80092 }
80093diff --git a/include/linux/audit.h b/include/linux/audit.h
80094index af84234..4177a40 100644
80095--- a/include/linux/audit.h
80096+++ b/include/linux/audit.h
80097@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
80098 extern unsigned int audit_serial(void);
80099 extern int auditsc_get_stamp(struct audit_context *ctx,
80100 struct timespec *t, unsigned int *serial);
80101-extern int audit_set_loginuid(kuid_t loginuid);
80102+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80103
80104 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80105 {
80106diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80107index 576e463..28fd926 100644
80108--- a/include/linux/binfmts.h
80109+++ b/include/linux/binfmts.h
80110@@ -44,7 +44,7 @@ struct linux_binprm {
80111 unsigned interp_flags;
80112 unsigned interp_data;
80113 unsigned long loader, exec;
80114-};
80115+} __randomize_layout;
80116
80117 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80118 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80119@@ -77,8 +77,10 @@ struct linux_binfmt {
80120 int (*load_binary)(struct linux_binprm *);
80121 int (*load_shlib)(struct file *);
80122 int (*core_dump)(struct coredump_params *cprm);
80123+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80124+ void (*handle_mmap)(struct file *);
80125 unsigned long min_coredump; /* minimal dump size */
80126-};
80127+} __do_const __randomize_layout;
80128
80129 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80130
80131diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80132index 202e403..16e6617 100644
80133--- a/include/linux/bitmap.h
80134+++ b/include/linux/bitmap.h
80135@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80136 return __bitmap_full(src, nbits);
80137 }
80138
80139-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80140+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80141 {
80142 if (small_const_nbits(nbits))
80143 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80144diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80145index 5d858e0..336c1d9 100644
80146--- a/include/linux/bitops.h
80147+++ b/include/linux/bitops.h
80148@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80149 * @word: value to rotate
80150 * @shift: bits to roll
80151 */
80152-static inline __u32 rol32(__u32 word, unsigned int shift)
80153+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80154 {
80155 return (word << shift) | (word >> (32 - shift));
80156 }
80157@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80158 * @word: value to rotate
80159 * @shift: bits to roll
80160 */
80161-static inline __u32 ror32(__u32 word, unsigned int shift)
80162+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80163 {
80164 return (word >> shift) | (word << (32 - shift));
80165 }
80166@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80167 return (__s32)(value << shift) >> shift;
80168 }
80169
80170-static inline unsigned fls_long(unsigned long l)
80171+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80172 {
80173 if (sizeof(l) == 4)
80174 return fls(l);
80175diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80176index 92f4b4b..483d537 100644
80177--- a/include/linux/blkdev.h
80178+++ b/include/linux/blkdev.h
80179@@ -1613,7 +1613,7 @@ struct block_device_operations {
80180 /* this callback is with swap_lock and sometimes page table lock held */
80181 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80182 struct module *owner;
80183-};
80184+} __do_const;
80185
80186 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80187 unsigned long);
80188diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80189index afc1343..9735539 100644
80190--- a/include/linux/blktrace_api.h
80191+++ b/include/linux/blktrace_api.h
80192@@ -25,7 +25,7 @@ struct blk_trace {
80193 struct dentry *dropped_file;
80194 struct dentry *msg_file;
80195 struct list_head running_list;
80196- atomic_t dropped;
80197+ atomic_unchecked_t dropped;
80198 };
80199
80200 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80201diff --git a/include/linux/cache.h b/include/linux/cache.h
80202index 17e7e82..1d7da26 100644
80203--- a/include/linux/cache.h
80204+++ b/include/linux/cache.h
80205@@ -16,6 +16,14 @@
80206 #define __read_mostly
80207 #endif
80208
80209+#ifndef __read_only
80210+#ifdef CONFIG_PAX_KERNEXEC
80211+#error KERNEXEC requires __read_only
80212+#else
80213+#define __read_only __read_mostly
80214+#endif
80215+#endif
80216+
80217 #ifndef ____cacheline_aligned
80218 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80219 #endif
80220diff --git a/include/linux/capability.h b/include/linux/capability.h
80221index aa93e5e..985a1b0 100644
80222--- a/include/linux/capability.h
80223+++ b/include/linux/capability.h
80224@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80225 extern bool capable(int cap);
80226 extern bool ns_capable(struct user_namespace *ns, int cap);
80227 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80228+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80229 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80230+extern bool capable_nolog(int cap);
80231+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80232
80233 /* audit system wants to get cap info from files as well */
80234 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80235
80236+extern int is_privileged_binary(const struct dentry *dentry);
80237+
80238 #endif /* !_LINUX_CAPABILITY_H */
80239diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80240index 8609d57..86e4d79 100644
80241--- a/include/linux/cdrom.h
80242+++ b/include/linux/cdrom.h
80243@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80244
80245 /* driver specifications */
80246 const int capability; /* capability flags */
80247- int n_minors; /* number of active minor devices */
80248 /* handle uniform packets for scsi type devices (scsi,atapi) */
80249 int (*generic_packet) (struct cdrom_device_info *,
80250 struct packet_command *);
80251diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80252index 4ce9056..86caac6 100644
80253--- a/include/linux/cleancache.h
80254+++ b/include/linux/cleancache.h
80255@@ -31,7 +31,7 @@ struct cleancache_ops {
80256 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80257 void (*invalidate_inode)(int, struct cleancache_filekey);
80258 void (*invalidate_fs)(int);
80259-};
80260+} __no_const;
80261
80262 extern struct cleancache_ops *
80263 cleancache_register_ops(struct cleancache_ops *ops);
80264diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80265index d936409..ce9f842 100644
80266--- a/include/linux/clk-provider.h
80267+++ b/include/linux/clk-provider.h
80268@@ -191,6 +191,7 @@ struct clk_ops {
80269 void (*init)(struct clk_hw *hw);
80270 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80271 };
80272+typedef struct clk_ops __no_const clk_ops_no_const;
80273
80274 /**
80275 * struct clk_init_data - holds init data that's common to all clocks and is
80276diff --git a/include/linux/compat.h b/include/linux/compat.h
80277index 7450ca2..a824b81 100644
80278--- a/include/linux/compat.h
80279+++ b/include/linux/compat.h
80280@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80281 compat_size_t __user *len_ptr);
80282
80283 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80284-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80285+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80286 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80287 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80288 compat_ssize_t msgsz, int msgflg);
80289@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80290 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80291 compat_ulong_t addr, compat_ulong_t data);
80292 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80293- compat_long_t addr, compat_long_t data);
80294+ compat_ulong_t addr, compat_ulong_t data);
80295
80296 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80297 /*
80298diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80299index d1a5582..4424efa 100644
80300--- a/include/linux/compiler-gcc4.h
80301+++ b/include/linux/compiler-gcc4.h
80302@@ -39,9 +39,34 @@
80303 # define __compiletime_warning(message) __attribute__((warning(message)))
80304 # define __compiletime_error(message) __attribute__((error(message)))
80305 #endif /* __CHECKER__ */
80306+
80307+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80308+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80309+#define __bos0(ptr) __bos((ptr), 0)
80310+#define __bos1(ptr) __bos((ptr), 1)
80311 #endif /* GCC_VERSION >= 40300 */
80312
80313 #if GCC_VERSION >= 40500
80314+
80315+#ifdef RANDSTRUCT_PLUGIN
80316+#define __randomize_layout __attribute__((randomize_layout))
80317+#define __no_randomize_layout __attribute__((no_randomize_layout))
80318+#endif
80319+
80320+#ifdef CONSTIFY_PLUGIN
80321+#define __no_const __attribute__((no_const))
80322+#define __do_const __attribute__((do_const))
80323+#endif
80324+
80325+#ifdef SIZE_OVERFLOW_PLUGIN
80326+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80327+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80328+#endif
80329+
80330+#ifdef LATENT_ENTROPY_PLUGIN
80331+#define __latent_entropy __attribute__((latent_entropy))
80332+#endif
80333+
80334 /*
80335 * Mark a position in code as unreachable. This can be used to
80336 * suppress control flow warnings after asm blocks that transfer
80337diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80338index c8c5659..d09f2ad 100644
80339--- a/include/linux/compiler-gcc5.h
80340+++ b/include/linux/compiler-gcc5.h
80341@@ -28,6 +28,28 @@
80342 # define __compiletime_error(message) __attribute__((error(message)))
80343 #endif /* __CHECKER__ */
80344
80345+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80346+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80347+#define __bos0(ptr) __bos((ptr), 0)
80348+#define __bos1(ptr) __bos((ptr), 1)
80349+
80350+#ifdef CONSTIFY_PLUGIN
80351+#error not yet
80352+#define __no_const __attribute__((no_const))
80353+#define __do_const __attribute__((do_const))
80354+#endif
80355+
80356+#ifdef SIZE_OVERFLOW_PLUGIN
80357+#error not yet
80358+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80359+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80360+#endif
80361+
80362+#ifdef LATENT_ENTROPY_PLUGIN
80363+#error not yet
80364+#define __latent_entropy __attribute__((latent_entropy))
80365+#endif
80366+
80367 /*
80368 * Mark a position in code as unreachable. This can be used to
80369 * suppress control flow warnings after asm blocks that transfer
80370diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80371index fa6a314..752a6ef 100644
80372--- a/include/linux/compiler.h
80373+++ b/include/linux/compiler.h
80374@@ -5,11 +5,14 @@
80375
80376 #ifdef __CHECKER__
80377 # define __user __attribute__((noderef, address_space(1)))
80378+# define __force_user __force __user
80379 # define __kernel __attribute__((address_space(0)))
80380+# define __force_kernel __force __kernel
80381 # define __safe __attribute__((safe))
80382 # define __force __attribute__((force))
80383 # define __nocast __attribute__((nocast))
80384 # define __iomem __attribute__((noderef, address_space(2)))
80385+# define __force_iomem __force __iomem
80386 # define __must_hold(x) __attribute__((context(x,1,1)))
80387 # define __acquires(x) __attribute__((context(x,0,1)))
80388 # define __releases(x) __attribute__((context(x,1,0)))
80389@@ -17,20 +20,37 @@
80390 # define __release(x) __context__(x,-1)
80391 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80392 # define __percpu __attribute__((noderef, address_space(3)))
80393+# define __force_percpu __force __percpu
80394 #ifdef CONFIG_SPARSE_RCU_POINTER
80395 # define __rcu __attribute__((noderef, address_space(4)))
80396+# define __force_rcu __force __rcu
80397 #else
80398 # define __rcu
80399+# define __force_rcu
80400 #endif
80401 extern void __chk_user_ptr(const volatile void __user *);
80402 extern void __chk_io_ptr(const volatile void __iomem *);
80403 #else
80404-# define __user
80405-# define __kernel
80406+# ifdef CHECKER_PLUGIN
80407+//# define __user
80408+//# define __force_user
80409+//# define __kernel
80410+//# define __force_kernel
80411+# else
80412+# ifdef STRUCTLEAK_PLUGIN
80413+# define __user __attribute__((user))
80414+# else
80415+# define __user
80416+# endif
80417+# define __force_user
80418+# define __kernel
80419+# define __force_kernel
80420+# endif
80421 # define __safe
80422 # define __force
80423 # define __nocast
80424 # define __iomem
80425+# define __force_iomem
80426 # define __chk_user_ptr(x) (void)0
80427 # define __chk_io_ptr(x) (void)0
80428 # define __builtin_warning(x, y...) (1)
80429@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80430 # define __release(x) (void)0
80431 # define __cond_lock(x,c) (c)
80432 # define __percpu
80433+# define __force_percpu
80434 # define __rcu
80435+# define __force_rcu
80436 #endif
80437
80438 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80439@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80440 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80441 {
80442 switch (size) {
80443- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80444- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80445- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80446+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80447+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80448+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80449 #ifdef CONFIG_64BIT
80450- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80451+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80452 #endif
80453 default:
80454 barrier();
80455- __builtin_memcpy((void *)res, (const void *)p, size);
80456+ __builtin_memcpy(res, (const void *)p, size);
80457 data_access_exceeds_word_size();
80458 barrier();
80459 }
80460 }
80461
80462-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80463+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80464 {
80465 switch (size) {
80466- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80467- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80468- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80469+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80470+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80471+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80472 #ifdef CONFIG_64BIT
80473- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80474+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80475 #endif
80476 default:
80477 barrier();
80478- __builtin_memcpy((void *)p, (const void *)res, size);
80479+ __builtin_memcpy((void *)p, res, size);
80480 data_access_exceeds_word_size();
80481 barrier();
80482 }
80483@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80484 # define __attribute_const__ /* unimplemented */
80485 #endif
80486
80487+#ifndef __randomize_layout
80488+# define __randomize_layout
80489+#endif
80490+
80491+#ifndef __no_randomize_layout
80492+# define __no_randomize_layout
80493+#endif
80494+
80495+#ifndef __no_const
80496+# define __no_const
80497+#endif
80498+
80499+#ifndef __do_const
80500+# define __do_const
80501+#endif
80502+
80503+#ifndef __size_overflow
80504+# define __size_overflow(...)
80505+#endif
80506+
80507+#ifndef __intentional_overflow
80508+# define __intentional_overflow(...)
80509+#endif
80510+
80511+#ifndef __latent_entropy
80512+# define __latent_entropy
80513+#endif
80514+
80515 /*
80516 * Tell gcc if a function is cold. The compiler will assume any path
80517 * directly leading to the call is unlikely.
80518@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80519 #define __cold
80520 #endif
80521
80522+#ifndef __alloc_size
80523+#define __alloc_size(...)
80524+#endif
80525+
80526+#ifndef __bos
80527+#define __bos(ptr, arg)
80528+#endif
80529+
80530+#ifndef __bos0
80531+#define __bos0(ptr)
80532+#endif
80533+
80534+#ifndef __bos1
80535+#define __bos1(ptr)
80536+#endif
80537+
80538 /* Simple shorthand for a section definition */
80539 #ifndef __section
80540 # define __section(S) __attribute__ ((__section__(#S)))
80541@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80542 */
80543 #define __ACCESS_ONCE(x) ({ \
80544 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80545- (volatile typeof(x) *)&(x); })
80546+ (volatile const typeof(x) *)&(x); })
80547 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80548+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80549
80550 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80551 #ifdef CONFIG_KPROBES
80552diff --git a/include/linux/completion.h b/include/linux/completion.h
80553index 5d5aaae..0ea9b84 100644
80554--- a/include/linux/completion.h
80555+++ b/include/linux/completion.h
80556@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80557
80558 extern void wait_for_completion(struct completion *);
80559 extern void wait_for_completion_io(struct completion *);
80560-extern int wait_for_completion_interruptible(struct completion *x);
80561-extern int wait_for_completion_killable(struct completion *x);
80562+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80563+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80564 extern unsigned long wait_for_completion_timeout(struct completion *x,
80565- unsigned long timeout);
80566+ unsigned long timeout) __intentional_overflow(-1);
80567 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80568- unsigned long timeout);
80569+ unsigned long timeout) __intentional_overflow(-1);
80570 extern long wait_for_completion_interruptible_timeout(
80571- struct completion *x, unsigned long timeout);
80572+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80573 extern long wait_for_completion_killable_timeout(
80574- struct completion *x, unsigned long timeout);
80575+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80576 extern bool try_wait_for_completion(struct completion *x);
80577 extern bool completion_done(struct completion *x);
80578
80579diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80580index 34025df..d94bbbc 100644
80581--- a/include/linux/configfs.h
80582+++ b/include/linux/configfs.h
80583@@ -125,7 +125,7 @@ struct configfs_attribute {
80584 const char *ca_name;
80585 struct module *ca_owner;
80586 umode_t ca_mode;
80587-};
80588+} __do_const;
80589
80590 /*
80591 * Users often need to create attribute structures for their configurable
80592diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80593index 4d078ce..c970f4d 100644
80594--- a/include/linux/cpufreq.h
80595+++ b/include/linux/cpufreq.h
80596@@ -206,6 +206,7 @@ struct global_attr {
80597 ssize_t (*store)(struct kobject *a, struct attribute *b,
80598 const char *c, size_t count);
80599 };
80600+typedef struct global_attr __no_const global_attr_no_const;
80601
80602 #define define_one_global_ro(_name) \
80603 static struct global_attr _name = \
80604@@ -277,7 +278,7 @@ struct cpufreq_driver {
80605 bool boost_supported;
80606 bool boost_enabled;
80607 int (*set_boost)(int state);
80608-};
80609+} __do_const;
80610
80611 /* flags */
80612 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80613diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80614index ab70f3b..3ef7771 100644
80615--- a/include/linux/cpuidle.h
80616+++ b/include/linux/cpuidle.h
80617@@ -50,7 +50,8 @@ struct cpuidle_state {
80618 int index);
80619
80620 int (*enter_dead) (struct cpuidle_device *dev, int index);
80621-};
80622+} __do_const;
80623+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80624
80625 /* Idle State Flags */
80626 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80627@@ -206,7 +207,7 @@ struct cpuidle_governor {
80628 void (*reflect) (struct cpuidle_device *dev, int index);
80629
80630 struct module *owner;
80631-};
80632+} __do_const;
80633
80634 #ifdef CONFIG_CPU_IDLE
80635 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80636diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80637index b950e9d..63810aa 100644
80638--- a/include/linux/cpumask.h
80639+++ b/include/linux/cpumask.h
80640@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80641 }
80642
80643 /* Valid inputs for n are -1 and 0. */
80644-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80645+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80646 {
80647 return n+1;
80648 }
80649
80650-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80651+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80652 {
80653 return n+1;
80654 }
80655
80656-static inline unsigned int cpumask_next_and(int n,
80657+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80658 const struct cpumask *srcp,
80659 const struct cpumask *andp)
80660 {
80661@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80662 *
80663 * Returns >= nr_cpu_ids if no further cpus set.
80664 */
80665-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80666+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80667 {
80668 /* -1 is a legal arg here. */
80669 if (n != -1)
80670@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80671 *
80672 * Returns >= nr_cpu_ids if no further cpus unset.
80673 */
80674-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80675+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80676 {
80677 /* -1 is a legal arg here. */
80678 if (n != -1)
80679@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80680 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80681 }
80682
80683-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80684+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80685 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80686 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80687
80688@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80689 * cpumask_weight - Count of bits in *srcp
80690 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80691 */
80692-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80693+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80694 {
80695 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80696 }
80697diff --git a/include/linux/cred.h b/include/linux/cred.h
80698index 2fb2ca2..d6a3340 100644
80699--- a/include/linux/cred.h
80700+++ b/include/linux/cred.h
80701@@ -35,7 +35,7 @@ struct group_info {
80702 int nblocks;
80703 kgid_t small_block[NGROUPS_SMALL];
80704 kgid_t *blocks[0];
80705-};
80706+} __randomize_layout;
80707
80708 /**
80709 * get_group_info - Get a reference to a group info structure
80710@@ -137,7 +137,7 @@ struct cred {
80711 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80712 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80713 struct rcu_head rcu; /* RCU deletion hook */
80714-};
80715+} __randomize_layout;
80716
80717 extern void __put_cred(struct cred *);
80718 extern void exit_creds(struct task_struct *);
80719@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80720 static inline void validate_process_creds(void)
80721 {
80722 }
80723+static inline void validate_task_creds(struct task_struct *task)
80724+{
80725+}
80726 #endif
80727
80728 /**
80729@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80730
80731 #define task_uid(task) (task_cred_xxx((task), uid))
80732 #define task_euid(task) (task_cred_xxx((task), euid))
80733+#define task_securebits(task) (task_cred_xxx((task), securebits))
80734
80735 #define current_cred_xxx(xxx) \
80736 ({ \
80737diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80738index 9c8776d..8c526c2 100644
80739--- a/include/linux/crypto.h
80740+++ b/include/linux/crypto.h
80741@@ -626,7 +626,7 @@ struct cipher_tfm {
80742 const u8 *key, unsigned int keylen);
80743 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80744 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80745-};
80746+} __no_const;
80747
80748 struct hash_tfm {
80749 int (*init)(struct hash_desc *desc);
80750@@ -647,13 +647,13 @@ struct compress_tfm {
80751 int (*cot_decompress)(struct crypto_tfm *tfm,
80752 const u8 *src, unsigned int slen,
80753 u8 *dst, unsigned int *dlen);
80754-};
80755+} __no_const;
80756
80757 struct rng_tfm {
80758 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80759 unsigned int dlen);
80760 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80761-};
80762+} __no_const;
80763
80764 #define crt_ablkcipher crt_u.ablkcipher
80765 #define crt_aead crt_u.aead
80766diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80767index 653589e..4ef254a 100644
80768--- a/include/linux/ctype.h
80769+++ b/include/linux/ctype.h
80770@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80771 * Fast implementation of tolower() for internal usage. Do not use in your
80772 * code.
80773 */
80774-static inline char _tolower(const char c)
80775+static inline unsigned char _tolower(const unsigned char c)
80776 {
80777 return c | 0x20;
80778 }
80779diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80780index 5a81398..6bbee30 100644
80781--- a/include/linux/dcache.h
80782+++ b/include/linux/dcache.h
80783@@ -123,6 +123,9 @@ struct dentry {
80784 unsigned long d_time; /* used by d_revalidate */
80785 void *d_fsdata; /* fs-specific data */
80786
80787+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80788+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80789+#endif
80790 struct list_head d_lru; /* LRU list */
80791 struct list_head d_child; /* child of parent list */
80792 struct list_head d_subdirs; /* our children */
80793@@ -133,7 +136,7 @@ struct dentry {
80794 struct hlist_node d_alias; /* inode alias list */
80795 struct rcu_head d_rcu;
80796 } d_u;
80797-};
80798+} __randomize_layout;
80799
80800 /*
80801 * dentry->d_lock spinlock nesting subclasses:
80802diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80803index 7925bf0..d5143d2 100644
80804--- a/include/linux/decompress/mm.h
80805+++ b/include/linux/decompress/mm.h
80806@@ -77,7 +77,7 @@ static void free(void *where)
80807 * warnings when not needed (indeed large_malloc / large_free are not
80808 * needed by inflate */
80809
80810-#define malloc(a) kmalloc(a, GFP_KERNEL)
80811+#define malloc(a) kmalloc((a), GFP_KERNEL)
80812 #define free(a) kfree(a)
80813
80814 #define large_malloc(a) vmalloc(a)
80815diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80816index ce447f0..83c66bd 100644
80817--- a/include/linux/devfreq.h
80818+++ b/include/linux/devfreq.h
80819@@ -114,7 +114,7 @@ struct devfreq_governor {
80820 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80821 int (*event_handler)(struct devfreq *devfreq,
80822 unsigned int event, void *data);
80823-};
80824+} __do_const;
80825
80826 /**
80827 * struct devfreq - Device devfreq structure
80828diff --git a/include/linux/device.h b/include/linux/device.h
80829index fb50673..ec0b35b 100644
80830--- a/include/linux/device.h
80831+++ b/include/linux/device.h
80832@@ -311,7 +311,7 @@ struct subsys_interface {
80833 struct list_head node;
80834 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80835 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80836-};
80837+} __do_const;
80838
80839 int subsys_interface_register(struct subsys_interface *sif);
80840 void subsys_interface_unregister(struct subsys_interface *sif);
80841@@ -507,7 +507,7 @@ struct device_type {
80842 void (*release)(struct device *dev);
80843
80844 const struct dev_pm_ops *pm;
80845-};
80846+} __do_const;
80847
80848 /* interface for exporting device attributes */
80849 struct device_attribute {
80850@@ -517,11 +517,12 @@ struct device_attribute {
80851 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80852 const char *buf, size_t count);
80853 };
80854+typedef struct device_attribute __no_const device_attribute_no_const;
80855
80856 struct dev_ext_attribute {
80857 struct device_attribute attr;
80858 void *var;
80859-};
80860+} __do_const;
80861
80862 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80863 char *buf);
80864diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80865index c3007cb..43efc8c 100644
80866--- a/include/linux/dma-mapping.h
80867+++ b/include/linux/dma-mapping.h
80868@@ -60,7 +60,7 @@ struct dma_map_ops {
80869 u64 (*get_required_mask)(struct device *dev);
80870 #endif
80871 int is_phys;
80872-};
80873+} __do_const;
80874
80875 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80876
80877diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80878index 40cd75e..38572a9 100644
80879--- a/include/linux/dmaengine.h
80880+++ b/include/linux/dmaengine.h
80881@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80882 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80883 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80884
80885-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80886+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80887 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80888-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80889+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80890 struct dma_pinned_list *pinned_list, struct page *page,
80891 unsigned int offset, size_t len);
80892
80893diff --git a/include/linux/efi.h b/include/linux/efi.h
80894index 0238d61..34a758f 100644
80895--- a/include/linux/efi.h
80896+++ b/include/linux/efi.h
80897@@ -1054,6 +1054,7 @@ struct efivar_operations {
80898 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80899 efi_query_variable_store_t *query_variable_store;
80900 };
80901+typedef struct efivar_operations __no_const efivar_operations_no_const;
80902
80903 struct efivars {
80904 /*
80905diff --git a/include/linux/elf.h b/include/linux/elf.h
80906index 20fa8d8..3d0dd18 100644
80907--- a/include/linux/elf.h
80908+++ b/include/linux/elf.h
80909@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80910 #define elf_note elf32_note
80911 #define elf_addr_t Elf32_Off
80912 #define Elf_Half Elf32_Half
80913+#define elf_dyn Elf32_Dyn
80914
80915 #else
80916
80917@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
80918 #define elf_note elf64_note
80919 #define elf_addr_t Elf64_Off
80920 #define Elf_Half Elf64_Half
80921+#define elf_dyn Elf64_Dyn
80922
80923 #endif
80924
80925diff --git a/include/linux/err.h b/include/linux/err.h
80926index a729120..6ede2c9 100644
80927--- a/include/linux/err.h
80928+++ b/include/linux/err.h
80929@@ -20,12 +20,12 @@
80930
80931 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80932
80933-static inline void * __must_check ERR_PTR(long error)
80934+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80935 {
80936 return (void *) error;
80937 }
80938
80939-static inline long __must_check PTR_ERR(__force const void *ptr)
80940+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80941 {
80942 return (long) ptr;
80943 }
80944diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80945index 36f49c4..a2a1f4c 100644
80946--- a/include/linux/extcon.h
80947+++ b/include/linux/extcon.h
80948@@ -135,7 +135,7 @@ struct extcon_dev {
80949 /* /sys/class/extcon/.../mutually_exclusive/... */
80950 struct attribute_group attr_g_muex;
80951 struct attribute **attrs_muex;
80952- struct device_attribute *d_attrs_muex;
80953+ device_attribute_no_const *d_attrs_muex;
80954 };
80955
80956 /**
80957diff --git a/include/linux/fb.h b/include/linux/fb.h
80958index 09bb7a1..d98870a 100644
80959--- a/include/linux/fb.h
80960+++ b/include/linux/fb.h
80961@@ -305,7 +305,7 @@ struct fb_ops {
80962 /* called at KDB enter and leave time to prepare the console */
80963 int (*fb_debug_enter)(struct fb_info *info);
80964 int (*fb_debug_leave)(struct fb_info *info);
80965-};
80966+} __do_const;
80967
80968 #ifdef CONFIG_FB_TILEBLITTING
80969 #define FB_TILE_CURSOR_NONE 0
80970diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
80971index 230f87b..1fd0485 100644
80972--- a/include/linux/fdtable.h
80973+++ b/include/linux/fdtable.h
80974@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
80975 void put_files_struct(struct files_struct *fs);
80976 void reset_files_struct(struct files_struct *);
80977 int unshare_files(struct files_struct **);
80978-struct files_struct *dup_fd(struct files_struct *, int *);
80979+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
80980 void do_close_on_exec(struct files_struct *);
80981 int iterate_fd(struct files_struct *, unsigned,
80982 int (*)(const void *, struct file *, unsigned),
80983diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
80984index 8293262..2b3b8bd 100644
80985--- a/include/linux/frontswap.h
80986+++ b/include/linux/frontswap.h
80987@@ -11,7 +11,7 @@ struct frontswap_ops {
80988 int (*load)(unsigned, pgoff_t, struct page *);
80989 void (*invalidate_page)(unsigned, pgoff_t);
80990 void (*invalidate_area)(unsigned);
80991-};
80992+} __no_const;
80993
80994 extern bool frontswap_enabled;
80995 extern struct frontswap_ops *
80996diff --git a/include/linux/fs.h b/include/linux/fs.h
80997index 42efe13..72d42ee 100644
80998--- a/include/linux/fs.h
80999+++ b/include/linux/fs.h
81000@@ -413,7 +413,7 @@ struct address_space {
81001 spinlock_t private_lock; /* for use by the address_space */
81002 struct list_head private_list; /* ditto */
81003 void *private_data; /* ditto */
81004-} __attribute__((aligned(sizeof(long))));
81005+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81006 /*
81007 * On most architectures that alignment is already the case; but
81008 * must be enforced here for CRIS, to let the least significant bit
81009@@ -456,7 +456,7 @@ struct block_device {
81010 int bd_fsfreeze_count;
81011 /* Mutex for freeze */
81012 struct mutex bd_fsfreeze_mutex;
81013-};
81014+} __randomize_layout;
81015
81016 /*
81017 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81018@@ -642,7 +642,7 @@ struct inode {
81019 #endif
81020
81021 void *i_private; /* fs or device private pointer */
81022-};
81023+} __randomize_layout;
81024
81025 static inline int inode_unhashed(struct inode *inode)
81026 {
81027@@ -837,7 +837,7 @@ struct file {
81028 struct list_head f_tfile_llink;
81029 #endif /* #ifdef CONFIG_EPOLL */
81030 struct address_space *f_mapping;
81031-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81032+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81033
81034 struct file_handle {
81035 __u32 handle_bytes;
81036@@ -962,7 +962,7 @@ struct file_lock {
81037 int state; /* state of grant or error if -ve */
81038 } afs;
81039 } fl_u;
81040-};
81041+} __randomize_layout;
81042
81043 /* The following constant reflects the upper bound of the file/locking space */
81044 #ifndef OFFSET_MAX
81045@@ -1305,7 +1305,7 @@ struct super_block {
81046 * Indicates how deep in a filesystem stack this SB is
81047 */
81048 int s_stack_depth;
81049-};
81050+} __randomize_layout;
81051
81052 extern struct timespec current_fs_time(struct super_block *sb);
81053
81054@@ -1536,7 +1536,8 @@ struct file_operations {
81055 long (*fallocate)(struct file *file, int mode, loff_t offset,
81056 loff_t len);
81057 void (*show_fdinfo)(struct seq_file *m, struct file *f);
81058-};
81059+} __do_const __randomize_layout;
81060+typedef struct file_operations __no_const file_operations_no_const;
81061
81062 struct inode_operations {
81063 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81064@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
81065 return !IS_DEADDIR(inode);
81066 }
81067
81068+static inline bool is_sidechannel_device(const struct inode *inode)
81069+{
81070+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81071+ umode_t mode = inode->i_mode;
81072+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81073+#else
81074+ return false;
81075+#endif
81076+}
81077+
81078 #endif /* _LINUX_FS_H */
81079diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81080index 0efc3e6..fd23610 100644
81081--- a/include/linux/fs_struct.h
81082+++ b/include/linux/fs_struct.h
81083@@ -6,13 +6,13 @@
81084 #include <linux/seqlock.h>
81085
81086 struct fs_struct {
81087- int users;
81088+ atomic_t users;
81089 spinlock_t lock;
81090 seqcount_t seq;
81091 int umask;
81092 int in_exec;
81093 struct path root, pwd;
81094-};
81095+} __randomize_layout;
81096
81097 extern struct kmem_cache *fs_cachep;
81098
81099diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81100index 7714849..a4a5c7a 100644
81101--- a/include/linux/fscache-cache.h
81102+++ b/include/linux/fscache-cache.h
81103@@ -113,7 +113,7 @@ struct fscache_operation {
81104 fscache_operation_release_t release;
81105 };
81106
81107-extern atomic_t fscache_op_debug_id;
81108+extern atomic_unchecked_t fscache_op_debug_id;
81109 extern void fscache_op_work_func(struct work_struct *work);
81110
81111 extern void fscache_enqueue_operation(struct fscache_operation *);
81112@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81113 INIT_WORK(&op->work, fscache_op_work_func);
81114 atomic_set(&op->usage, 1);
81115 op->state = FSCACHE_OP_ST_INITIALISED;
81116- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81117+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81118 op->processor = processor;
81119 op->release = release;
81120 INIT_LIST_HEAD(&op->pend_link);
81121diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81122index 115bb81..e7b812b 100644
81123--- a/include/linux/fscache.h
81124+++ b/include/linux/fscache.h
81125@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81126 * - this is mandatory for any object that may have data
81127 */
81128 void (*now_uncached)(void *cookie_netfs_data);
81129-};
81130+} __do_const;
81131
81132 /*
81133 * fscache cached network filesystem type
81134diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81135index 7ee1774..72505b8 100644
81136--- a/include/linux/fsnotify.h
81137+++ b/include/linux/fsnotify.h
81138@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81139 struct inode *inode = file_inode(file);
81140 __u32 mask = FS_ACCESS;
81141
81142+ if (is_sidechannel_device(inode))
81143+ return;
81144+
81145 if (S_ISDIR(inode->i_mode))
81146 mask |= FS_ISDIR;
81147
81148@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81149 struct inode *inode = file_inode(file);
81150 __u32 mask = FS_MODIFY;
81151
81152+ if (is_sidechannel_device(inode))
81153+ return;
81154+
81155 if (S_ISDIR(inode->i_mode))
81156 mask |= FS_ISDIR;
81157
81158@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81159 */
81160 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81161 {
81162- return kstrdup(name, GFP_KERNEL);
81163+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81164 }
81165
81166 /*
81167diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81168index ec274e0..e678159 100644
81169--- a/include/linux/genhd.h
81170+++ b/include/linux/genhd.h
81171@@ -194,7 +194,7 @@ struct gendisk {
81172 struct kobject *slave_dir;
81173
81174 struct timer_rand_state *random;
81175- atomic_t sync_io; /* RAID */
81176+ atomic_unchecked_t sync_io; /* RAID */
81177 struct disk_events *ev;
81178 #ifdef CONFIG_BLK_DEV_INTEGRITY
81179 struct blk_integrity *integrity;
81180@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81181 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81182
81183 /* drivers/char/random.c */
81184-extern void add_disk_randomness(struct gendisk *disk);
81185+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81186 extern void rand_initialize_disk(struct gendisk *disk);
81187
81188 static inline sector_t get_start_sect(struct block_device *bdev)
81189diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81190index 667c311..abac2a7 100644
81191--- a/include/linux/genl_magic_func.h
81192+++ b/include/linux/genl_magic_func.h
81193@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81194 },
81195
81196 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81197-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81198+static struct genl_ops ZZZ_genl_ops[] = {
81199 #include GENL_MAGIC_INCLUDE_FILE
81200 };
81201
81202diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81203index b840e3b..aeaeef9 100644
81204--- a/include/linux/gfp.h
81205+++ b/include/linux/gfp.h
81206@@ -34,6 +34,13 @@ struct vm_area_struct;
81207 #define ___GFP_NO_KSWAPD 0x400000u
81208 #define ___GFP_OTHER_NODE 0x800000u
81209 #define ___GFP_WRITE 0x1000000u
81210+
81211+#ifdef CONFIG_PAX_USERCOPY_SLABS
81212+#define ___GFP_USERCOPY 0x2000000u
81213+#else
81214+#define ___GFP_USERCOPY 0
81215+#endif
81216+
81217 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81218
81219 /*
81220@@ -90,6 +97,7 @@ struct vm_area_struct;
81221 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81222 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81223 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81224+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81225
81226 /*
81227 * This may seem redundant, but it's a way of annotating false positives vs.
81228@@ -97,7 +105,7 @@ struct vm_area_struct;
81229 */
81230 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81231
81232-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81233+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81234 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81235
81236 /* This equals 0, but use constants in case they ever change */
81237@@ -152,6 +160,8 @@ struct vm_area_struct;
81238 /* 4GB DMA on some platforms */
81239 #define GFP_DMA32 __GFP_DMA32
81240
81241+#define GFP_USERCOPY __GFP_USERCOPY
81242+
81243 /* Convert GFP flags to their corresponding migrate type */
81244 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81245 {
81246diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81247new file mode 100644
81248index 0000000..91858e4
81249--- /dev/null
81250+++ b/include/linux/gracl.h
81251@@ -0,0 +1,342 @@
81252+#ifndef GR_ACL_H
81253+#define GR_ACL_H
81254+
81255+#include <linux/grdefs.h>
81256+#include <linux/resource.h>
81257+#include <linux/capability.h>
81258+#include <linux/dcache.h>
81259+#include <asm/resource.h>
81260+
81261+/* Major status information */
81262+
81263+#define GR_VERSION "grsecurity 3.1"
81264+#define GRSECURITY_VERSION 0x3100
81265+
81266+enum {
81267+ GR_SHUTDOWN = 0,
81268+ GR_ENABLE = 1,
81269+ GR_SPROLE = 2,
81270+ GR_OLDRELOAD = 3,
81271+ GR_SEGVMOD = 4,
81272+ GR_STATUS = 5,
81273+ GR_UNSPROLE = 6,
81274+ GR_PASSSET = 7,
81275+ GR_SPROLEPAM = 8,
81276+ GR_RELOAD = 9,
81277+};
81278+
81279+/* Password setup definitions
81280+ * kernel/grhash.c */
81281+enum {
81282+ GR_PW_LEN = 128,
81283+ GR_SALT_LEN = 16,
81284+ GR_SHA_LEN = 32,
81285+};
81286+
81287+enum {
81288+ GR_SPROLE_LEN = 64,
81289+};
81290+
81291+enum {
81292+ GR_NO_GLOB = 0,
81293+ GR_REG_GLOB,
81294+ GR_CREATE_GLOB
81295+};
81296+
81297+#define GR_NLIMITS 32
81298+
81299+/* Begin Data Structures */
81300+
81301+struct sprole_pw {
81302+ unsigned char *rolename;
81303+ unsigned char salt[GR_SALT_LEN];
81304+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81305+};
81306+
81307+struct name_entry {
81308+ __u32 key;
81309+ u64 inode;
81310+ dev_t device;
81311+ char *name;
81312+ __u16 len;
81313+ __u8 deleted;
81314+ struct name_entry *prev;
81315+ struct name_entry *next;
81316+};
81317+
81318+struct inodev_entry {
81319+ struct name_entry *nentry;
81320+ struct inodev_entry *prev;
81321+ struct inodev_entry *next;
81322+};
81323+
81324+struct acl_role_db {
81325+ struct acl_role_label **r_hash;
81326+ __u32 r_size;
81327+};
81328+
81329+struct inodev_db {
81330+ struct inodev_entry **i_hash;
81331+ __u32 i_size;
81332+};
81333+
81334+struct name_db {
81335+ struct name_entry **n_hash;
81336+ __u32 n_size;
81337+};
81338+
81339+struct crash_uid {
81340+ uid_t uid;
81341+ unsigned long expires;
81342+};
81343+
81344+struct gr_hash_struct {
81345+ void **table;
81346+ void **nametable;
81347+ void *first;
81348+ __u32 table_size;
81349+ __u32 used_size;
81350+ int type;
81351+};
81352+
81353+/* Userspace Grsecurity ACL data structures */
81354+
81355+struct acl_subject_label {
81356+ char *filename;
81357+ u64 inode;
81358+ dev_t device;
81359+ __u32 mode;
81360+ kernel_cap_t cap_mask;
81361+ kernel_cap_t cap_lower;
81362+ kernel_cap_t cap_invert_audit;
81363+
81364+ struct rlimit res[GR_NLIMITS];
81365+ __u32 resmask;
81366+
81367+ __u8 user_trans_type;
81368+ __u8 group_trans_type;
81369+ uid_t *user_transitions;
81370+ gid_t *group_transitions;
81371+ __u16 user_trans_num;
81372+ __u16 group_trans_num;
81373+
81374+ __u32 sock_families[2];
81375+ __u32 ip_proto[8];
81376+ __u32 ip_type;
81377+ struct acl_ip_label **ips;
81378+ __u32 ip_num;
81379+ __u32 inaddr_any_override;
81380+
81381+ __u32 crashes;
81382+ unsigned long expires;
81383+
81384+ struct acl_subject_label *parent_subject;
81385+ struct gr_hash_struct *hash;
81386+ struct acl_subject_label *prev;
81387+ struct acl_subject_label *next;
81388+
81389+ struct acl_object_label **obj_hash;
81390+ __u32 obj_hash_size;
81391+ __u16 pax_flags;
81392+};
81393+
81394+struct role_allowed_ip {
81395+ __u32 addr;
81396+ __u32 netmask;
81397+
81398+ struct role_allowed_ip *prev;
81399+ struct role_allowed_ip *next;
81400+};
81401+
81402+struct role_transition {
81403+ char *rolename;
81404+
81405+ struct role_transition *prev;
81406+ struct role_transition *next;
81407+};
81408+
81409+struct acl_role_label {
81410+ char *rolename;
81411+ uid_t uidgid;
81412+ __u16 roletype;
81413+
81414+ __u16 auth_attempts;
81415+ unsigned long expires;
81416+
81417+ struct acl_subject_label *root_label;
81418+ struct gr_hash_struct *hash;
81419+
81420+ struct acl_role_label *prev;
81421+ struct acl_role_label *next;
81422+
81423+ struct role_transition *transitions;
81424+ struct role_allowed_ip *allowed_ips;
81425+ uid_t *domain_children;
81426+ __u16 domain_child_num;
81427+
81428+ umode_t umask;
81429+
81430+ struct acl_subject_label **subj_hash;
81431+ __u32 subj_hash_size;
81432+};
81433+
81434+struct user_acl_role_db {
81435+ struct acl_role_label **r_table;
81436+ __u32 num_pointers; /* Number of allocations to track */
81437+ __u32 num_roles; /* Number of roles */
81438+ __u32 num_domain_children; /* Number of domain children */
81439+ __u32 num_subjects; /* Number of subjects */
81440+ __u32 num_objects; /* Number of objects */
81441+};
81442+
81443+struct acl_object_label {
81444+ char *filename;
81445+ u64 inode;
81446+ dev_t device;
81447+ __u32 mode;
81448+
81449+ struct acl_subject_label *nested;
81450+ struct acl_object_label *globbed;
81451+
81452+ /* next two structures not used */
81453+
81454+ struct acl_object_label *prev;
81455+ struct acl_object_label *next;
81456+};
81457+
81458+struct acl_ip_label {
81459+ char *iface;
81460+ __u32 addr;
81461+ __u32 netmask;
81462+ __u16 low, high;
81463+ __u8 mode;
81464+ __u32 type;
81465+ __u32 proto[8];
81466+
81467+ /* next two structures not used */
81468+
81469+ struct acl_ip_label *prev;
81470+ struct acl_ip_label *next;
81471+};
81472+
81473+struct gr_arg {
81474+ struct user_acl_role_db role_db;
81475+ unsigned char pw[GR_PW_LEN];
81476+ unsigned char salt[GR_SALT_LEN];
81477+ unsigned char sum[GR_SHA_LEN];
81478+ unsigned char sp_role[GR_SPROLE_LEN];
81479+ struct sprole_pw *sprole_pws;
81480+ dev_t segv_device;
81481+ u64 segv_inode;
81482+ uid_t segv_uid;
81483+ __u16 num_sprole_pws;
81484+ __u16 mode;
81485+};
81486+
81487+struct gr_arg_wrapper {
81488+ struct gr_arg *arg;
81489+ __u32 version;
81490+ __u32 size;
81491+};
81492+
81493+struct subject_map {
81494+ struct acl_subject_label *user;
81495+ struct acl_subject_label *kernel;
81496+ struct subject_map *prev;
81497+ struct subject_map *next;
81498+};
81499+
81500+struct acl_subj_map_db {
81501+ struct subject_map **s_hash;
81502+ __u32 s_size;
81503+};
81504+
81505+struct gr_policy_state {
81506+ struct sprole_pw **acl_special_roles;
81507+ __u16 num_sprole_pws;
81508+ struct acl_role_label *kernel_role;
81509+ struct acl_role_label *role_list;
81510+ struct acl_role_label *default_role;
81511+ struct acl_role_db acl_role_set;
81512+ struct acl_subj_map_db subj_map_set;
81513+ struct name_db name_set;
81514+ struct inodev_db inodev_set;
81515+};
81516+
81517+struct gr_alloc_state {
81518+ unsigned long alloc_stack_next;
81519+ unsigned long alloc_stack_size;
81520+ void **alloc_stack;
81521+};
81522+
81523+struct gr_reload_state {
81524+ struct gr_policy_state oldpolicy;
81525+ struct gr_alloc_state oldalloc;
81526+ struct gr_policy_state newpolicy;
81527+ struct gr_alloc_state newalloc;
81528+ struct gr_policy_state *oldpolicy_ptr;
81529+ struct gr_alloc_state *oldalloc_ptr;
81530+ unsigned char oldmode;
81531+};
81532+
81533+/* End Data Structures Section */
81534+
81535+/* Hash functions generated by empirical testing by Brad Spengler
81536+ Makes good use of the low bits of the inode. Generally 0-1 times
81537+ in loop for successful match. 0-3 for unsuccessful match.
81538+ Shift/add algorithm with modulus of table size and an XOR*/
81539+
81540+static __inline__ unsigned int
81541+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81542+{
81543+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81544+}
81545+
81546+ static __inline__ unsigned int
81547+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81548+{
81549+ return ((const unsigned long)userp % sz);
81550+}
81551+
81552+static __inline__ unsigned int
81553+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81554+{
81555+ unsigned int rem;
81556+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81557+ return rem;
81558+}
81559+
81560+static __inline__ unsigned int
81561+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81562+{
81563+ return full_name_hash((const unsigned char *)name, len) % sz;
81564+}
81565+
81566+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81567+ subj = NULL; \
81568+ iter = 0; \
81569+ while (iter < role->subj_hash_size) { \
81570+ if (subj == NULL) \
81571+ subj = role->subj_hash[iter]; \
81572+ if (subj == NULL) { \
81573+ iter++; \
81574+ continue; \
81575+ }
81576+
81577+#define FOR_EACH_SUBJECT_END(subj,iter) \
81578+ subj = subj->next; \
81579+ if (subj == NULL) \
81580+ iter++; \
81581+ }
81582+
81583+
81584+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81585+ subj = role->hash->first; \
81586+ while (subj != NULL) {
81587+
81588+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81589+ subj = subj->next; \
81590+ }
81591+
81592+#endif
81593+
81594diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81595new file mode 100644
81596index 0000000..af64092
81597--- /dev/null
81598+++ b/include/linux/gracl_compat.h
81599@@ -0,0 +1,156 @@
81600+#ifndef GR_ACL_COMPAT_H
81601+#define GR_ACL_COMPAT_H
81602+
81603+#include <linux/resource.h>
81604+#include <asm/resource.h>
81605+
81606+struct sprole_pw_compat {
81607+ compat_uptr_t rolename;
81608+ unsigned char salt[GR_SALT_LEN];
81609+ unsigned char sum[GR_SHA_LEN];
81610+};
81611+
81612+struct gr_hash_struct_compat {
81613+ compat_uptr_t table;
81614+ compat_uptr_t nametable;
81615+ compat_uptr_t first;
81616+ __u32 table_size;
81617+ __u32 used_size;
81618+ int type;
81619+};
81620+
81621+struct acl_subject_label_compat {
81622+ compat_uptr_t filename;
81623+ compat_u64 inode;
81624+ __u32 device;
81625+ __u32 mode;
81626+ kernel_cap_t cap_mask;
81627+ kernel_cap_t cap_lower;
81628+ kernel_cap_t cap_invert_audit;
81629+
81630+ struct compat_rlimit res[GR_NLIMITS];
81631+ __u32 resmask;
81632+
81633+ __u8 user_trans_type;
81634+ __u8 group_trans_type;
81635+ compat_uptr_t user_transitions;
81636+ compat_uptr_t group_transitions;
81637+ __u16 user_trans_num;
81638+ __u16 group_trans_num;
81639+
81640+ __u32 sock_families[2];
81641+ __u32 ip_proto[8];
81642+ __u32 ip_type;
81643+ compat_uptr_t ips;
81644+ __u32 ip_num;
81645+ __u32 inaddr_any_override;
81646+
81647+ __u32 crashes;
81648+ compat_ulong_t expires;
81649+
81650+ compat_uptr_t parent_subject;
81651+ compat_uptr_t hash;
81652+ compat_uptr_t prev;
81653+ compat_uptr_t next;
81654+
81655+ compat_uptr_t obj_hash;
81656+ __u32 obj_hash_size;
81657+ __u16 pax_flags;
81658+};
81659+
81660+struct role_allowed_ip_compat {
81661+ __u32 addr;
81662+ __u32 netmask;
81663+
81664+ compat_uptr_t prev;
81665+ compat_uptr_t next;
81666+};
81667+
81668+struct role_transition_compat {
81669+ compat_uptr_t rolename;
81670+
81671+ compat_uptr_t prev;
81672+ compat_uptr_t next;
81673+};
81674+
81675+struct acl_role_label_compat {
81676+ compat_uptr_t rolename;
81677+ uid_t uidgid;
81678+ __u16 roletype;
81679+
81680+ __u16 auth_attempts;
81681+ compat_ulong_t expires;
81682+
81683+ compat_uptr_t root_label;
81684+ compat_uptr_t hash;
81685+
81686+ compat_uptr_t prev;
81687+ compat_uptr_t next;
81688+
81689+ compat_uptr_t transitions;
81690+ compat_uptr_t allowed_ips;
81691+ compat_uptr_t domain_children;
81692+ __u16 domain_child_num;
81693+
81694+ umode_t umask;
81695+
81696+ compat_uptr_t subj_hash;
81697+ __u32 subj_hash_size;
81698+};
81699+
81700+struct user_acl_role_db_compat {
81701+ compat_uptr_t r_table;
81702+ __u32 num_pointers;
81703+ __u32 num_roles;
81704+ __u32 num_domain_children;
81705+ __u32 num_subjects;
81706+ __u32 num_objects;
81707+};
81708+
81709+struct acl_object_label_compat {
81710+ compat_uptr_t filename;
81711+ compat_u64 inode;
81712+ __u32 device;
81713+ __u32 mode;
81714+
81715+ compat_uptr_t nested;
81716+ compat_uptr_t globbed;
81717+
81718+ compat_uptr_t prev;
81719+ compat_uptr_t next;
81720+};
81721+
81722+struct acl_ip_label_compat {
81723+ compat_uptr_t iface;
81724+ __u32 addr;
81725+ __u32 netmask;
81726+ __u16 low, high;
81727+ __u8 mode;
81728+ __u32 type;
81729+ __u32 proto[8];
81730+
81731+ compat_uptr_t prev;
81732+ compat_uptr_t next;
81733+};
81734+
81735+struct gr_arg_compat {
81736+ struct user_acl_role_db_compat role_db;
81737+ unsigned char pw[GR_PW_LEN];
81738+ unsigned char salt[GR_SALT_LEN];
81739+ unsigned char sum[GR_SHA_LEN];
81740+ unsigned char sp_role[GR_SPROLE_LEN];
81741+ compat_uptr_t sprole_pws;
81742+ __u32 segv_device;
81743+ compat_u64 segv_inode;
81744+ uid_t segv_uid;
81745+ __u16 num_sprole_pws;
81746+ __u16 mode;
81747+};
81748+
81749+struct gr_arg_wrapper_compat {
81750+ compat_uptr_t arg;
81751+ __u32 version;
81752+ __u32 size;
81753+};
81754+
81755+#endif
81756diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81757new file mode 100644
81758index 0000000..323ecf2
81759--- /dev/null
81760+++ b/include/linux/gralloc.h
81761@@ -0,0 +1,9 @@
81762+#ifndef __GRALLOC_H
81763+#define __GRALLOC_H
81764+
81765+void acl_free_all(void);
81766+int acl_alloc_stack_init(unsigned long size);
81767+void *acl_alloc(unsigned long len);
81768+void *acl_alloc_num(unsigned long num, unsigned long len);
81769+
81770+#endif
81771diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81772new file mode 100644
81773index 0000000..be66033
81774--- /dev/null
81775+++ b/include/linux/grdefs.h
81776@@ -0,0 +1,140 @@
81777+#ifndef GRDEFS_H
81778+#define GRDEFS_H
81779+
81780+/* Begin grsecurity status declarations */
81781+
81782+enum {
81783+ GR_READY = 0x01,
81784+ GR_STATUS_INIT = 0x00 // disabled state
81785+};
81786+
81787+/* Begin ACL declarations */
81788+
81789+/* Role flags */
81790+
81791+enum {
81792+ GR_ROLE_USER = 0x0001,
81793+ GR_ROLE_GROUP = 0x0002,
81794+ GR_ROLE_DEFAULT = 0x0004,
81795+ GR_ROLE_SPECIAL = 0x0008,
81796+ GR_ROLE_AUTH = 0x0010,
81797+ GR_ROLE_NOPW = 0x0020,
81798+ GR_ROLE_GOD = 0x0040,
81799+ GR_ROLE_LEARN = 0x0080,
81800+ GR_ROLE_TPE = 0x0100,
81801+ GR_ROLE_DOMAIN = 0x0200,
81802+ GR_ROLE_PAM = 0x0400,
81803+ GR_ROLE_PERSIST = 0x0800
81804+};
81805+
81806+/* ACL Subject and Object mode flags */
81807+enum {
81808+ GR_DELETED = 0x80000000
81809+};
81810+
81811+/* ACL Object-only mode flags */
81812+enum {
81813+ GR_READ = 0x00000001,
81814+ GR_APPEND = 0x00000002,
81815+ GR_WRITE = 0x00000004,
81816+ GR_EXEC = 0x00000008,
81817+ GR_FIND = 0x00000010,
81818+ GR_INHERIT = 0x00000020,
81819+ GR_SETID = 0x00000040,
81820+ GR_CREATE = 0x00000080,
81821+ GR_DELETE = 0x00000100,
81822+ GR_LINK = 0x00000200,
81823+ GR_AUDIT_READ = 0x00000400,
81824+ GR_AUDIT_APPEND = 0x00000800,
81825+ GR_AUDIT_WRITE = 0x00001000,
81826+ GR_AUDIT_EXEC = 0x00002000,
81827+ GR_AUDIT_FIND = 0x00004000,
81828+ GR_AUDIT_INHERIT= 0x00008000,
81829+ GR_AUDIT_SETID = 0x00010000,
81830+ GR_AUDIT_CREATE = 0x00020000,
81831+ GR_AUDIT_DELETE = 0x00040000,
81832+ GR_AUDIT_LINK = 0x00080000,
81833+ GR_PTRACERD = 0x00100000,
81834+ GR_NOPTRACE = 0x00200000,
81835+ GR_SUPPRESS = 0x00400000,
81836+ GR_NOLEARN = 0x00800000,
81837+ GR_INIT_TRANSFER= 0x01000000
81838+};
81839+
81840+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81841+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81842+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81843+
81844+/* ACL subject-only mode flags */
81845+enum {
81846+ GR_KILL = 0x00000001,
81847+ GR_VIEW = 0x00000002,
81848+ GR_PROTECTED = 0x00000004,
81849+ GR_LEARN = 0x00000008,
81850+ GR_OVERRIDE = 0x00000010,
81851+ /* just a placeholder, this mode is only used in userspace */
81852+ GR_DUMMY = 0x00000020,
81853+ GR_PROTSHM = 0x00000040,
81854+ GR_KILLPROC = 0x00000080,
81855+ GR_KILLIPPROC = 0x00000100,
81856+ /* just a placeholder, this mode is only used in userspace */
81857+ GR_NOTROJAN = 0x00000200,
81858+ GR_PROTPROCFD = 0x00000400,
81859+ GR_PROCACCT = 0x00000800,
81860+ GR_RELAXPTRACE = 0x00001000,
81861+ //GR_NESTED = 0x00002000,
81862+ GR_INHERITLEARN = 0x00004000,
81863+ GR_PROCFIND = 0x00008000,
81864+ GR_POVERRIDE = 0x00010000,
81865+ GR_KERNELAUTH = 0x00020000,
81866+ GR_ATSECURE = 0x00040000,
81867+ GR_SHMEXEC = 0x00080000
81868+};
81869+
81870+enum {
81871+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81872+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81873+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81874+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81875+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81876+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81877+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81878+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81879+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81880+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81881+};
81882+
81883+enum {
81884+ GR_ID_USER = 0x01,
81885+ GR_ID_GROUP = 0x02,
81886+};
81887+
81888+enum {
81889+ GR_ID_ALLOW = 0x01,
81890+ GR_ID_DENY = 0x02,
81891+};
81892+
81893+#define GR_CRASH_RES 31
81894+#define GR_UIDTABLE_MAX 500
81895+
81896+/* begin resource learning section */
81897+enum {
81898+ GR_RLIM_CPU_BUMP = 60,
81899+ GR_RLIM_FSIZE_BUMP = 50000,
81900+ GR_RLIM_DATA_BUMP = 10000,
81901+ GR_RLIM_STACK_BUMP = 1000,
81902+ GR_RLIM_CORE_BUMP = 10000,
81903+ GR_RLIM_RSS_BUMP = 500000,
81904+ GR_RLIM_NPROC_BUMP = 1,
81905+ GR_RLIM_NOFILE_BUMP = 5,
81906+ GR_RLIM_MEMLOCK_BUMP = 50000,
81907+ GR_RLIM_AS_BUMP = 500000,
81908+ GR_RLIM_LOCKS_BUMP = 2,
81909+ GR_RLIM_SIGPENDING_BUMP = 5,
81910+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81911+ GR_RLIM_NICE_BUMP = 1,
81912+ GR_RLIM_RTPRIO_BUMP = 1,
81913+ GR_RLIM_RTTIME_BUMP = 1000000
81914+};
81915+
81916+#endif
81917diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
81918new file mode 100644
81919index 0000000..fb1de5d
81920--- /dev/null
81921+++ b/include/linux/grinternal.h
81922@@ -0,0 +1,230 @@
81923+#ifndef __GRINTERNAL_H
81924+#define __GRINTERNAL_H
81925+
81926+#ifdef CONFIG_GRKERNSEC
81927+
81928+#include <linux/fs.h>
81929+#include <linux/mnt_namespace.h>
81930+#include <linux/nsproxy.h>
81931+#include <linux/gracl.h>
81932+#include <linux/grdefs.h>
81933+#include <linux/grmsg.h>
81934+
81935+void gr_add_learn_entry(const char *fmt, ...)
81936+ __attribute__ ((format (printf, 1, 2)));
81937+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
81938+ const struct vfsmount *mnt);
81939+__u32 gr_check_create(const struct dentry *new_dentry,
81940+ const struct dentry *parent,
81941+ const struct vfsmount *mnt, const __u32 mode);
81942+int gr_check_protected_task(const struct task_struct *task);
81943+__u32 to_gr_audit(const __u32 reqmode);
81944+int gr_set_acls(const int type);
81945+int gr_acl_is_enabled(void);
81946+char gr_roletype_to_char(void);
81947+
81948+void gr_handle_alertkill(struct task_struct *task);
81949+char *gr_to_filename(const struct dentry *dentry,
81950+ const struct vfsmount *mnt);
81951+char *gr_to_filename1(const struct dentry *dentry,
81952+ const struct vfsmount *mnt);
81953+char *gr_to_filename2(const struct dentry *dentry,
81954+ const struct vfsmount *mnt);
81955+char *gr_to_filename3(const struct dentry *dentry,
81956+ const struct vfsmount *mnt);
81957+
81958+extern int grsec_enable_ptrace_readexec;
81959+extern int grsec_enable_harden_ptrace;
81960+extern int grsec_enable_link;
81961+extern int grsec_enable_fifo;
81962+extern int grsec_enable_execve;
81963+extern int grsec_enable_shm;
81964+extern int grsec_enable_execlog;
81965+extern int grsec_enable_signal;
81966+extern int grsec_enable_audit_ptrace;
81967+extern int grsec_enable_forkfail;
81968+extern int grsec_enable_time;
81969+extern int grsec_enable_rofs;
81970+extern int grsec_deny_new_usb;
81971+extern int grsec_enable_chroot_shmat;
81972+extern int grsec_enable_chroot_mount;
81973+extern int grsec_enable_chroot_double;
81974+extern int grsec_enable_chroot_pivot;
81975+extern int grsec_enable_chroot_chdir;
81976+extern int grsec_enable_chroot_chmod;
81977+extern int grsec_enable_chroot_mknod;
81978+extern int grsec_enable_chroot_fchdir;
81979+extern int grsec_enable_chroot_nice;
81980+extern int grsec_enable_chroot_execlog;
81981+extern int grsec_enable_chroot_caps;
81982+extern int grsec_enable_chroot_rename;
81983+extern int grsec_enable_chroot_sysctl;
81984+extern int grsec_enable_chroot_unix;
81985+extern int grsec_enable_symlinkown;
81986+extern kgid_t grsec_symlinkown_gid;
81987+extern int grsec_enable_tpe;
81988+extern kgid_t grsec_tpe_gid;
81989+extern int grsec_enable_tpe_all;
81990+extern int grsec_enable_tpe_invert;
81991+extern int grsec_enable_socket_all;
81992+extern kgid_t grsec_socket_all_gid;
81993+extern int grsec_enable_socket_client;
81994+extern kgid_t grsec_socket_client_gid;
81995+extern int grsec_enable_socket_server;
81996+extern kgid_t grsec_socket_server_gid;
81997+extern kgid_t grsec_audit_gid;
81998+extern int grsec_enable_group;
81999+extern int grsec_enable_log_rwxmaps;
82000+extern int grsec_enable_mount;
82001+extern int grsec_enable_chdir;
82002+extern int grsec_resource_logging;
82003+extern int grsec_enable_blackhole;
82004+extern int grsec_lastack_retries;
82005+extern int grsec_enable_brute;
82006+extern int grsec_enable_harden_ipc;
82007+extern int grsec_lock;
82008+
82009+extern spinlock_t grsec_alert_lock;
82010+extern unsigned long grsec_alert_wtime;
82011+extern unsigned long grsec_alert_fyet;
82012+
82013+extern spinlock_t grsec_audit_lock;
82014+
82015+extern rwlock_t grsec_exec_file_lock;
82016+
82017+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82018+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82019+ (tsk)->exec_file->f_path.mnt) : "/")
82020+
82021+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82022+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82023+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82024+
82025+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82026+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82027+ (tsk)->exec_file->f_path.mnt) : "/")
82028+
82029+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82030+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82031+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82032+
82033+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82034+
82035+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82036+
82037+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82038+{
82039+ if (file1 && file2) {
82040+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82041+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82042+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82043+ return true;
82044+ }
82045+
82046+ return false;
82047+}
82048+
82049+#define GR_CHROOT_CAPS {{ \
82050+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82051+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82052+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82053+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82054+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82055+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82056+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82057+
82058+#define security_learn(normal_msg,args...) \
82059+({ \
82060+ read_lock(&grsec_exec_file_lock); \
82061+ gr_add_learn_entry(normal_msg "\n", ## args); \
82062+ read_unlock(&grsec_exec_file_lock); \
82063+})
82064+
82065+enum {
82066+ GR_DO_AUDIT,
82067+ GR_DONT_AUDIT,
82068+ /* used for non-audit messages that we shouldn't kill the task on */
82069+ GR_DONT_AUDIT_GOOD
82070+};
82071+
82072+enum {
82073+ GR_TTYSNIFF,
82074+ GR_RBAC,
82075+ GR_RBAC_STR,
82076+ GR_STR_RBAC,
82077+ GR_RBAC_MODE2,
82078+ GR_RBAC_MODE3,
82079+ GR_FILENAME,
82080+ GR_SYSCTL_HIDDEN,
82081+ GR_NOARGS,
82082+ GR_ONE_INT,
82083+ GR_ONE_INT_TWO_STR,
82084+ GR_ONE_STR,
82085+ GR_STR_INT,
82086+ GR_TWO_STR_INT,
82087+ GR_TWO_INT,
82088+ GR_TWO_U64,
82089+ GR_THREE_INT,
82090+ GR_FIVE_INT_TWO_STR,
82091+ GR_TWO_STR,
82092+ GR_THREE_STR,
82093+ GR_FOUR_STR,
82094+ GR_STR_FILENAME,
82095+ GR_FILENAME_STR,
82096+ GR_FILENAME_TWO_INT,
82097+ GR_FILENAME_TWO_INT_STR,
82098+ GR_TEXTREL,
82099+ GR_PTRACE,
82100+ GR_RESOURCE,
82101+ GR_CAP,
82102+ GR_SIG,
82103+ GR_SIG2,
82104+ GR_CRASH1,
82105+ GR_CRASH2,
82106+ GR_PSACCT,
82107+ GR_RWXMAP,
82108+ GR_RWXMAPVMA
82109+};
82110+
82111+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82112+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82113+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82114+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82115+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82116+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82117+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82118+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82119+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82120+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82121+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82122+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82123+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82124+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82125+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82126+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82127+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82128+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82129+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82130+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82131+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82132+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82133+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82134+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82135+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82136+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82137+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82138+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82139+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82140+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82141+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82142+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82143+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82144+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82145+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82146+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82147+
82148+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82149+
82150+#endif
82151+
82152+#endif
82153diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82154new file mode 100644
82155index 0000000..26ef560
82156--- /dev/null
82157+++ b/include/linux/grmsg.h
82158@@ -0,0 +1,118 @@
82159+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82160+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82161+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82162+#define GR_STOPMOD_MSG "denied modification of module state by "
82163+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82164+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82165+#define GR_IOPERM_MSG "denied use of ioperm() by "
82166+#define GR_IOPL_MSG "denied use of iopl() by "
82167+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82168+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82169+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82170+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82171+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82172+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82173+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82174+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82175+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82176+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82177+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82178+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82179+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82180+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82181+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82182+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82183+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82184+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82185+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82186+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82187+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82188+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82189+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82190+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82191+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82192+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82193+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82194+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82195+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82196+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82197+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82198+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82199+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82200+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82201+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82202+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82203+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82204+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82205+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82206+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82207+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82208+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82209+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82210+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82211+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82212+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82213+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82214+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82215+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82216+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82217+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82218+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82219+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82220+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82221+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82222+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82223+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82224+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82225+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82226+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82227+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82228+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82229+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82230+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82231+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82232+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82233+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82234+#define GR_NICE_CHROOT_MSG "denied priority change by "
82235+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82236+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82237+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82238+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82239+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82240+#define GR_TIME_MSG "time set by "
82241+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82242+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82243+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82244+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82245+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82246+#define GR_BIND_MSG "denied bind() by "
82247+#define GR_CONNECT_MSG "denied connect() by "
82248+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82249+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82250+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82251+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82252+#define GR_CAP_ACL_MSG "use of %s denied for "
82253+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82254+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82255+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82256+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82257+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82258+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82259+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82260+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82261+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82262+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82263+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82264+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82265+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82266+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82267+#define GR_VM86_MSG "denied use of vm86 by "
82268+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82269+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82270+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82271+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82272+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82273+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82274+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82275+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82276+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82277diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82278new file mode 100644
82279index 0000000..63c1850
82280--- /dev/null
82281+++ b/include/linux/grsecurity.h
82282@@ -0,0 +1,250 @@
82283+#ifndef GR_SECURITY_H
82284+#define GR_SECURITY_H
82285+#include <linux/fs.h>
82286+#include <linux/fs_struct.h>
82287+#include <linux/binfmts.h>
82288+#include <linux/gracl.h>
82289+
82290+/* notify of brain-dead configs */
82291+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82292+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82293+#endif
82294+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82295+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82296+#endif
82297+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82298+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82299+#endif
82300+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82301+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82302+#endif
82303+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82304+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82305+#endif
82306+
82307+int gr_handle_new_usb(void);
82308+
82309+void gr_handle_brute_attach(int dumpable);
82310+void gr_handle_brute_check(void);
82311+void gr_handle_kernel_exploit(void);
82312+
82313+char gr_roletype_to_char(void);
82314+
82315+int gr_proc_is_restricted(void);
82316+
82317+int gr_acl_enable_at_secure(void);
82318+
82319+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82320+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82321+
82322+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82323+
82324+void gr_del_task_from_ip_table(struct task_struct *p);
82325+
82326+int gr_pid_is_chrooted(struct task_struct *p);
82327+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82328+int gr_handle_chroot_nice(void);
82329+int gr_handle_chroot_sysctl(const int op);
82330+int gr_handle_chroot_setpriority(struct task_struct *p,
82331+ const int niceval);
82332+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82333+int gr_chroot_fhandle(void);
82334+int gr_handle_chroot_chroot(const struct dentry *dentry,
82335+ const struct vfsmount *mnt);
82336+void gr_handle_chroot_chdir(const struct path *path);
82337+int gr_handle_chroot_chmod(const struct dentry *dentry,
82338+ const struct vfsmount *mnt, const int mode);
82339+int gr_handle_chroot_mknod(const struct dentry *dentry,
82340+ const struct vfsmount *mnt, const int mode);
82341+int gr_handle_chroot_mount(const struct dentry *dentry,
82342+ const struct vfsmount *mnt,
82343+ const char *dev_name);
82344+int gr_handle_chroot_pivot(void);
82345+int gr_handle_chroot_unix(const pid_t pid);
82346+
82347+int gr_handle_rawio(const struct inode *inode);
82348+
82349+void gr_handle_ioperm(void);
82350+void gr_handle_iopl(void);
82351+void gr_handle_msr_write(void);
82352+
82353+umode_t gr_acl_umask(void);
82354+
82355+int gr_tpe_allow(const struct file *file);
82356+
82357+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82358+void gr_clear_chroot_entries(struct task_struct *task);
82359+
82360+void gr_log_forkfail(const int retval);
82361+void gr_log_timechange(void);
82362+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82363+void gr_log_chdir(const struct dentry *dentry,
82364+ const struct vfsmount *mnt);
82365+void gr_log_chroot_exec(const struct dentry *dentry,
82366+ const struct vfsmount *mnt);
82367+void gr_log_remount(const char *devname, const int retval);
82368+void gr_log_unmount(const char *devname, const int retval);
82369+void gr_log_mount(const char *from, struct path *to, const int retval);
82370+void gr_log_textrel(struct vm_area_struct *vma);
82371+void gr_log_ptgnustack(struct file *file);
82372+void gr_log_rwxmmap(struct file *file);
82373+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82374+
82375+int gr_handle_follow_link(const struct inode *parent,
82376+ const struct inode *inode,
82377+ const struct dentry *dentry,
82378+ const struct vfsmount *mnt);
82379+int gr_handle_fifo(const struct dentry *dentry,
82380+ const struct vfsmount *mnt,
82381+ const struct dentry *dir, const int flag,
82382+ const int acc_mode);
82383+int gr_handle_hardlink(const struct dentry *dentry,
82384+ const struct vfsmount *mnt,
82385+ struct inode *inode,
82386+ const int mode, const struct filename *to);
82387+
82388+int gr_is_capable(const int cap);
82389+int gr_is_capable_nolog(const int cap);
82390+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82391+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82392+
82393+void gr_copy_label(struct task_struct *tsk);
82394+void gr_handle_crash(struct task_struct *task, const int sig);
82395+int gr_handle_signal(const struct task_struct *p, const int sig);
82396+int gr_check_crash_uid(const kuid_t uid);
82397+int gr_check_protected_task(const struct task_struct *task);
82398+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82399+int gr_acl_handle_mmap(const struct file *file,
82400+ const unsigned long prot);
82401+int gr_acl_handle_mprotect(const struct file *file,
82402+ const unsigned long prot);
82403+int gr_check_hidden_task(const struct task_struct *tsk);
82404+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82405+ const struct vfsmount *mnt);
82406+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82407+ const struct vfsmount *mnt);
82408+__u32 gr_acl_handle_access(const struct dentry *dentry,
82409+ const struct vfsmount *mnt, const int fmode);
82410+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82411+ const struct vfsmount *mnt, umode_t *mode);
82412+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82413+ const struct vfsmount *mnt);
82414+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82415+ const struct vfsmount *mnt);
82416+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82417+ const struct vfsmount *mnt);
82418+int gr_handle_ptrace(struct task_struct *task, const long request);
82419+int gr_handle_proc_ptrace(struct task_struct *task);
82420+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82421+ const struct vfsmount *mnt);
82422+int gr_check_crash_exec(const struct file *filp);
82423+int gr_acl_is_enabled(void);
82424+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82425+ const kgid_t gid);
82426+int gr_set_proc_label(const struct dentry *dentry,
82427+ const struct vfsmount *mnt,
82428+ const int unsafe_flags);
82429+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82430+ const struct vfsmount *mnt);
82431+__u32 gr_acl_handle_open(const struct dentry *dentry,
82432+ const struct vfsmount *mnt, int acc_mode);
82433+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82434+ const struct dentry *p_dentry,
82435+ const struct vfsmount *p_mnt,
82436+ int open_flags, int acc_mode, const int imode);
82437+void gr_handle_create(const struct dentry *dentry,
82438+ const struct vfsmount *mnt);
82439+void gr_handle_proc_create(const struct dentry *dentry,
82440+ const struct inode *inode);
82441+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82442+ const struct dentry *parent_dentry,
82443+ const struct vfsmount *parent_mnt,
82444+ const int mode);
82445+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82446+ const struct dentry *parent_dentry,
82447+ const struct vfsmount *parent_mnt);
82448+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82449+ const struct vfsmount *mnt);
82450+void gr_handle_delete(const u64 ino, const dev_t dev);
82451+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82452+ const struct vfsmount *mnt);
82453+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82454+ const struct dentry *parent_dentry,
82455+ const struct vfsmount *parent_mnt,
82456+ const struct filename *from);
82457+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82458+ const struct dentry *parent_dentry,
82459+ const struct vfsmount *parent_mnt,
82460+ const struct dentry *old_dentry,
82461+ const struct vfsmount *old_mnt, const struct filename *to);
82462+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82463+int gr_acl_handle_rename(struct dentry *new_dentry,
82464+ struct dentry *parent_dentry,
82465+ const struct vfsmount *parent_mnt,
82466+ struct dentry *old_dentry,
82467+ struct inode *old_parent_inode,
82468+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82469+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82470+ struct dentry *old_dentry,
82471+ struct dentry *new_dentry,
82472+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82473+__u32 gr_check_link(const struct dentry *new_dentry,
82474+ const struct dentry *parent_dentry,
82475+ const struct vfsmount *parent_mnt,
82476+ const struct dentry *old_dentry,
82477+ const struct vfsmount *old_mnt);
82478+int gr_acl_handle_filldir(const struct file *file, const char *name,
82479+ const unsigned int namelen, const u64 ino);
82480+
82481+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82482+ const struct vfsmount *mnt);
82483+void gr_acl_handle_exit(void);
82484+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82485+int gr_acl_handle_procpidmem(const struct task_struct *task);
82486+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82487+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82488+void gr_audit_ptrace(struct task_struct *task);
82489+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82490+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82491+void gr_put_exec_file(struct task_struct *task);
82492+
82493+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82494+
82495+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82496+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82497+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82498+ struct dentry *newdentry, struct vfsmount *newmnt);
82499+
82500+#ifdef CONFIG_GRKERNSEC_RESLOG
82501+extern void gr_log_resource(const struct task_struct *task, const int res,
82502+ const unsigned long wanted, const int gt);
82503+#else
82504+static inline void gr_log_resource(const struct task_struct *task, const int res,
82505+ const unsigned long wanted, const int gt)
82506+{
82507+}
82508+#endif
82509+
82510+#ifdef CONFIG_GRKERNSEC
82511+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82512+void gr_handle_vm86(void);
82513+void gr_handle_mem_readwrite(u64 from, u64 to);
82514+
82515+void gr_log_badprocpid(const char *entry);
82516+
82517+extern int grsec_enable_dmesg;
82518+extern int grsec_disable_privio;
82519+
82520+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82521+extern kgid_t grsec_proc_gid;
82522+#endif
82523+
82524+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82525+extern int grsec_enable_chroot_findtask;
82526+#endif
82527+#ifdef CONFIG_GRKERNSEC_SETXID
82528+extern int grsec_enable_setxid;
82529+#endif
82530+#endif
82531+
82532+#endif
82533diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82534new file mode 100644
82535index 0000000..e7ffaaf
82536--- /dev/null
82537+++ b/include/linux/grsock.h
82538@@ -0,0 +1,19 @@
82539+#ifndef __GRSOCK_H
82540+#define __GRSOCK_H
82541+
82542+extern void gr_attach_curr_ip(const struct sock *sk);
82543+extern int gr_handle_sock_all(const int family, const int type,
82544+ const int protocol);
82545+extern int gr_handle_sock_server(const struct sockaddr *sck);
82546+extern int gr_handle_sock_server_other(const struct sock *sck);
82547+extern int gr_handle_sock_client(const struct sockaddr *sck);
82548+extern int gr_search_connect(struct socket * sock,
82549+ struct sockaddr_in * addr);
82550+extern int gr_search_bind(struct socket * sock,
82551+ struct sockaddr_in * addr);
82552+extern int gr_search_listen(struct socket * sock);
82553+extern int gr_search_accept(struct socket * sock);
82554+extern int gr_search_socket(const int domain, const int type,
82555+ const int protocol);
82556+
82557+#endif
82558diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82559index 9286a46..373f27f 100644
82560--- a/include/linux/highmem.h
82561+++ b/include/linux/highmem.h
82562@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82563 kunmap_atomic(kaddr);
82564 }
82565
82566+static inline void sanitize_highpage(struct page *page)
82567+{
82568+ void *kaddr;
82569+ unsigned long flags;
82570+
82571+ local_irq_save(flags);
82572+ kaddr = kmap_atomic(page);
82573+ clear_page(kaddr);
82574+ kunmap_atomic(kaddr);
82575+ local_irq_restore(flags);
82576+}
82577+
82578 static inline void zero_user_segments(struct page *page,
82579 unsigned start1, unsigned end1,
82580 unsigned start2, unsigned end2)
82581diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82582index 1c7b89a..7dda400 100644
82583--- a/include/linux/hwmon-sysfs.h
82584+++ b/include/linux/hwmon-sysfs.h
82585@@ -25,7 +25,8 @@
82586 struct sensor_device_attribute{
82587 struct device_attribute dev_attr;
82588 int index;
82589-};
82590+} __do_const;
82591+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82592 #define to_sensor_dev_attr(_dev_attr) \
82593 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82594
82595@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82596 struct device_attribute dev_attr;
82597 u8 index;
82598 u8 nr;
82599-};
82600+} __do_const;
82601+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82602 #define to_sensor_dev_attr_2(_dev_attr) \
82603 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82604
82605diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82606index 7c76959..153e597 100644
82607--- a/include/linux/i2c.h
82608+++ b/include/linux/i2c.h
82609@@ -413,6 +413,7 @@ struct i2c_algorithm {
82610 int (*unreg_slave)(struct i2c_client *client);
82611 #endif
82612 };
82613+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82614
82615 /**
82616 * struct i2c_bus_recovery_info - I2C bus recovery information
82617diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82618index d23c3c2..eb63c81 100644
82619--- a/include/linux/i2o.h
82620+++ b/include/linux/i2o.h
82621@@ -565,7 +565,7 @@ struct i2o_controller {
82622 struct i2o_device *exec; /* Executive */
82623 #if BITS_PER_LONG == 64
82624 spinlock_t context_list_lock; /* lock for context_list */
82625- atomic_t context_list_counter; /* needed for unique contexts */
82626+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82627 struct list_head context_list; /* list of context id's
82628 and pointers */
82629 #endif
82630diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82631index aff7ad8..3942bbd 100644
82632--- a/include/linux/if_pppox.h
82633+++ b/include/linux/if_pppox.h
82634@@ -76,7 +76,7 @@ struct pppox_proto {
82635 int (*ioctl)(struct socket *sock, unsigned int cmd,
82636 unsigned long arg);
82637 struct module *owner;
82638-};
82639+} __do_const;
82640
82641 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82642 extern void unregister_pppox_proto(int proto_num);
82643diff --git a/include/linux/init.h b/include/linux/init.h
82644index 2df8e8d..3e1280d 100644
82645--- a/include/linux/init.h
82646+++ b/include/linux/init.h
82647@@ -37,9 +37,17 @@
82648 * section.
82649 */
82650
82651+#define add_init_latent_entropy __latent_entropy
82652+
82653+#ifdef CONFIG_MEMORY_HOTPLUG
82654+#define add_meminit_latent_entropy
82655+#else
82656+#define add_meminit_latent_entropy __latent_entropy
82657+#endif
82658+
82659 /* These are for everybody (although not all archs will actually
82660 discard it in modules) */
82661-#define __init __section(.init.text) __cold notrace
82662+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82663 #define __initdata __section(.init.data)
82664 #define __initconst __constsection(.init.rodata)
82665 #define __exitdata __section(.exit.data)
82666@@ -100,7 +108,7 @@
82667 #define __cpuexitconst
82668
82669 /* Used for MEMORY_HOTPLUG */
82670-#define __meminit __section(.meminit.text) __cold notrace
82671+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82672 #define __meminitdata __section(.meminit.data)
82673 #define __meminitconst __constsection(.meminit.rodata)
82674 #define __memexit __section(.memexit.text) __exitused __cold notrace
82675diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82676index 3037fc0..c6527ce 100644
82677--- a/include/linux/init_task.h
82678+++ b/include/linux/init_task.h
82679@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82680
82681 #define INIT_TASK_COMM "swapper"
82682
82683+#ifdef CONFIG_X86
82684+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82685+#else
82686+#define INIT_TASK_THREAD_INFO
82687+#endif
82688+
82689 #ifdef CONFIG_RT_MUTEXES
82690 # define INIT_RT_MUTEXES(tsk) \
82691 .pi_waiters = RB_ROOT, \
82692@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82693 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82694 .comm = INIT_TASK_COMM, \
82695 .thread = INIT_THREAD, \
82696+ INIT_TASK_THREAD_INFO \
82697 .fs = &init_fs, \
82698 .files = &init_files, \
82699 .signal = &init_signals, \
82700diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82701index d9b05b5..e5f5b7b 100644
82702--- a/include/linux/interrupt.h
82703+++ b/include/linux/interrupt.h
82704@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82705
82706 struct softirq_action
82707 {
82708- void (*action)(struct softirq_action *);
82709-};
82710+ void (*action)(void);
82711+} __no_const;
82712
82713 asmlinkage void do_softirq(void);
82714 asmlinkage void __do_softirq(void);
82715@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82716 }
82717 #endif
82718
82719-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82720+extern void open_softirq(int nr, void (*action)(void));
82721 extern void softirq_init(void);
82722 extern void __raise_softirq_irqoff(unsigned int nr);
82723
82724diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82725index 38daa45..4de4317 100644
82726--- a/include/linux/iommu.h
82727+++ b/include/linux/iommu.h
82728@@ -147,7 +147,7 @@ struct iommu_ops {
82729
82730 unsigned long pgsize_bitmap;
82731 void *priv;
82732-};
82733+} __do_const;
82734
82735 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82736 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82737diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82738index 2c525022..345b106 100644
82739--- a/include/linux/ioport.h
82740+++ b/include/linux/ioport.h
82741@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82742 int adjust_resource(struct resource *res, resource_size_t start,
82743 resource_size_t size);
82744 resource_size_t resource_alignment(struct resource *res);
82745-static inline resource_size_t resource_size(const struct resource *res)
82746+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82747 {
82748 return res->end - res->start + 1;
82749 }
82750diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82751index 1eee6bc..9cf4912 100644
82752--- a/include/linux/ipc_namespace.h
82753+++ b/include/linux/ipc_namespace.h
82754@@ -60,7 +60,7 @@ struct ipc_namespace {
82755 struct user_namespace *user_ns;
82756
82757 struct ns_common ns;
82758-};
82759+} __randomize_layout;
82760
82761 extern struct ipc_namespace init_ipc_ns;
82762 extern atomic_t nr_ipc_ns;
82763diff --git a/include/linux/irq.h b/include/linux/irq.h
82764index d09ec7a..f373eb5 100644
82765--- a/include/linux/irq.h
82766+++ b/include/linux/irq.h
82767@@ -364,7 +364,8 @@ struct irq_chip {
82768 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82769
82770 unsigned long flags;
82771-};
82772+} __do_const;
82773+typedef struct irq_chip __no_const irq_chip_no_const;
82774
82775 /*
82776 * irq_chip specific flags
82777diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82778index 71d706d..817cdec 100644
82779--- a/include/linux/irqchip/arm-gic.h
82780+++ b/include/linux/irqchip/arm-gic.h
82781@@ -95,7 +95,7 @@
82782
82783 struct device_node;
82784
82785-extern struct irq_chip gic_arch_extn;
82786+extern irq_chip_no_const gic_arch_extn;
82787
82788 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82789 u32 offset, struct device_node *);
82790diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82791index faf433a..7dcb186 100644
82792--- a/include/linux/irqdesc.h
82793+++ b/include/linux/irqdesc.h
82794@@ -61,7 +61,7 @@ struct irq_desc {
82795 unsigned int irq_count; /* For detecting broken IRQs */
82796 unsigned long last_unhandled; /* Aging timer for unhandled count */
82797 unsigned int irqs_unhandled;
82798- atomic_t threads_handled;
82799+ atomic_unchecked_t threads_handled;
82800 int threads_handled_last;
82801 raw_spinlock_t lock;
82802 struct cpumask *percpu_enabled;
82803diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82804index c367cbd..c9b79e6 100644
82805--- a/include/linux/jiffies.h
82806+++ b/include/linux/jiffies.h
82807@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82808 /*
82809 * Convert various time units to each other:
82810 */
82811-extern unsigned int jiffies_to_msecs(const unsigned long j);
82812-extern unsigned int jiffies_to_usecs(const unsigned long j);
82813+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82814+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82815
82816-static inline u64 jiffies_to_nsecs(const unsigned long j)
82817+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82818 {
82819 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82820 }
82821
82822-extern unsigned long msecs_to_jiffies(const unsigned int m);
82823-extern unsigned long usecs_to_jiffies(const unsigned int u);
82824+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82825+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82826 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82827 extern void jiffies_to_timespec(const unsigned long jiffies,
82828- struct timespec *value);
82829-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82830+ struct timespec *value) __intentional_overflow(-1);
82831+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82832 extern void jiffies_to_timeval(const unsigned long jiffies,
82833 struct timeval *value);
82834
82835diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82836index 6883e19..e854fcb 100644
82837--- a/include/linux/kallsyms.h
82838+++ b/include/linux/kallsyms.h
82839@@ -15,7 +15,8 @@
82840
82841 struct module;
82842
82843-#ifdef CONFIG_KALLSYMS
82844+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82845+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82846 /* Lookup the address for a symbol. Returns 0 if not found. */
82847 unsigned long kallsyms_lookup_name(const char *name);
82848
82849@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82850 /* Stupid that this does nothing, but I didn't create this mess. */
82851 #define __print_symbol(fmt, addr)
82852 #endif /*CONFIG_KALLSYMS*/
82853+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82854+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82855+extern unsigned long kallsyms_lookup_name(const char *name);
82856+extern void __print_symbol(const char *fmt, unsigned long address);
82857+extern int sprint_backtrace(char *buffer, unsigned long address);
82858+extern int sprint_symbol(char *buffer, unsigned long address);
82859+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82860+const char *kallsyms_lookup(unsigned long addr,
82861+ unsigned long *symbolsize,
82862+ unsigned long *offset,
82863+ char **modname, char *namebuf);
82864+extern int kallsyms_lookup_size_offset(unsigned long addr,
82865+ unsigned long *symbolsize,
82866+ unsigned long *offset);
82867+#endif
82868
82869 /* This macro allows us to keep printk typechecking */
82870 static __printf(1, 2)
82871diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82872index 64ce58b..6bcdbfa 100644
82873--- a/include/linux/kernel.h
82874+++ b/include/linux/kernel.h
82875@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82876 /* Obsolete, do not use. Use kstrto<foo> instead */
82877
82878 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82879-extern long simple_strtol(const char *,char **,unsigned int);
82880+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82881 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82882 extern long long simple_strtoll(const char *,char **,unsigned int);
82883
82884diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82885index ff9f1d3..6712be5 100644
82886--- a/include/linux/key-type.h
82887+++ b/include/linux/key-type.h
82888@@ -152,7 +152,7 @@ struct key_type {
82889 /* internal fields */
82890 struct list_head link; /* link in types list */
82891 struct lock_class_key lock_class; /* key->sem lock class */
82892-};
82893+} __do_const;
82894
82895 extern struct key_type key_type_keyring;
82896
82897diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82898index e465bb1..19f605fd 100644
82899--- a/include/linux/kgdb.h
82900+++ b/include/linux/kgdb.h
82901@@ -52,7 +52,7 @@ extern int kgdb_connected;
82902 extern int kgdb_io_module_registered;
82903
82904 extern atomic_t kgdb_setting_breakpoint;
82905-extern atomic_t kgdb_cpu_doing_single_step;
82906+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82907
82908 extern struct task_struct *kgdb_usethread;
82909 extern struct task_struct *kgdb_contthread;
82910@@ -254,7 +254,7 @@ struct kgdb_arch {
82911 void (*correct_hw_break)(void);
82912
82913 void (*enable_nmi)(bool on);
82914-};
82915+} __do_const;
82916
82917 /**
82918 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
82919@@ -279,7 +279,7 @@ struct kgdb_io {
82920 void (*pre_exception) (void);
82921 void (*post_exception) (void);
82922 int is_console;
82923-};
82924+} __do_const;
82925
82926 extern struct kgdb_arch arch_kgdb_ops;
82927
82928diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
82929index e705467..a92471d 100644
82930--- a/include/linux/kmemleak.h
82931+++ b/include/linux/kmemleak.h
82932@@ -27,7 +27,7 @@
82933
82934 extern void kmemleak_init(void) __ref;
82935 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82936- gfp_t gfp) __ref;
82937+ gfp_t gfp) __ref __size_overflow(2);
82938 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
82939 extern void kmemleak_free(const void *ptr) __ref;
82940 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
82941@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
82942 static inline void kmemleak_init(void)
82943 {
82944 }
82945-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82946+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
82947 gfp_t gfp)
82948 {
82949 }
82950diff --git a/include/linux/kmod.h b/include/linux/kmod.h
82951index 0555cc6..40116ce 100644
82952--- a/include/linux/kmod.h
82953+++ b/include/linux/kmod.h
82954@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
82955 * usually useless though. */
82956 extern __printf(2, 3)
82957 int __request_module(bool wait, const char *name, ...);
82958+extern __printf(3, 4)
82959+int ___request_module(bool wait, char *param_name, const char *name, ...);
82960 #define request_module(mod...) __request_module(true, mod)
82961 #define request_module_nowait(mod...) __request_module(false, mod)
82962 #define try_then_request_module(x, mod...) \
82963@@ -57,6 +59,9 @@ struct subprocess_info {
82964 struct work_struct work;
82965 struct completion *complete;
82966 char *path;
82967+#ifdef CONFIG_GRKERNSEC
82968+ char *origpath;
82969+#endif
82970 char **argv;
82971 char **envp;
82972 int wait;
82973diff --git a/include/linux/kobject.h b/include/linux/kobject.h
82974index 2d61b90..a1d0a13 100644
82975--- a/include/linux/kobject.h
82976+++ b/include/linux/kobject.h
82977@@ -118,7 +118,7 @@ struct kobj_type {
82978 struct attribute **default_attrs;
82979 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
82980 const void *(*namespace)(struct kobject *kobj);
82981-};
82982+} __do_const;
82983
82984 struct kobj_uevent_env {
82985 char *argv[3];
82986@@ -142,6 +142,7 @@ struct kobj_attribute {
82987 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
82988 const char *buf, size_t count);
82989 };
82990+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
82991
82992 extern const struct sysfs_ops kobj_sysfs_ops;
82993
82994@@ -169,7 +170,7 @@ struct kset {
82995 spinlock_t list_lock;
82996 struct kobject kobj;
82997 const struct kset_uevent_ops *uevent_ops;
82998-};
82999+} __randomize_layout;
83000
83001 extern void kset_init(struct kset *kset);
83002 extern int __must_check kset_register(struct kset *kset);
83003diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83004index df32d25..fb52e27 100644
83005--- a/include/linux/kobject_ns.h
83006+++ b/include/linux/kobject_ns.h
83007@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83008 const void *(*netlink_ns)(struct sock *sk);
83009 const void *(*initial_ns)(void);
83010 void (*drop_ns)(void *);
83011-};
83012+} __do_const;
83013
83014 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83015 int kobj_ns_type_registered(enum kobj_ns_type type);
83016diff --git a/include/linux/kref.h b/include/linux/kref.h
83017index 484604d..0f6c5b6 100644
83018--- a/include/linux/kref.h
83019+++ b/include/linux/kref.h
83020@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83021 static inline int kref_sub(struct kref *kref, unsigned int count,
83022 void (*release)(struct kref *kref))
83023 {
83024- WARN_ON(release == NULL);
83025+ BUG_ON(release == NULL);
83026
83027 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83028 release(kref);
83029diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83030index 26f1060..bafc04a 100644
83031--- a/include/linux/kvm_host.h
83032+++ b/include/linux/kvm_host.h
83033@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
83034 {
83035 }
83036 #endif
83037-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83038+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83039 struct module *module);
83040 void kvm_exit(void);
83041
83042@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83043 struct kvm_guest_debug *dbg);
83044 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83045
83046-int kvm_arch_init(void *opaque);
83047+int kvm_arch_init(const void *opaque);
83048 void kvm_arch_exit(void);
83049
83050 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83051diff --git a/include/linux/libata.h b/include/linux/libata.h
83052index 91f705d..24be831 100644
83053--- a/include/linux/libata.h
83054+++ b/include/linux/libata.h
83055@@ -979,7 +979,7 @@ struct ata_port_operations {
83056 * fields must be pointers.
83057 */
83058 const struct ata_port_operations *inherits;
83059-};
83060+} __do_const;
83061
83062 struct ata_port_info {
83063 unsigned long flags;
83064diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83065index a6a42dd..6c5ebce 100644
83066--- a/include/linux/linkage.h
83067+++ b/include/linux/linkage.h
83068@@ -36,6 +36,7 @@
83069 #endif
83070
83071 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83072+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83073 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83074
83075 /*
83076diff --git a/include/linux/list.h b/include/linux/list.h
83077index feb773c..98f3075 100644
83078--- a/include/linux/list.h
83079+++ b/include/linux/list.h
83080@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
83081 extern void list_del(struct list_head *entry);
83082 #endif
83083
83084+extern void __pax_list_add(struct list_head *new,
83085+ struct list_head *prev,
83086+ struct list_head *next);
83087+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83088+{
83089+ __pax_list_add(new, head, head->next);
83090+}
83091+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83092+{
83093+ __pax_list_add(new, head->prev, head);
83094+}
83095+extern void pax_list_del(struct list_head *entry);
83096+
83097 /**
83098 * list_replace - replace old entry by new one
83099 * @old : the element to be replaced
83100@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83101 INIT_LIST_HEAD(entry);
83102 }
83103
83104+extern void pax_list_del_init(struct list_head *entry);
83105+
83106 /**
83107 * list_move - delete from one list and add as another's head
83108 * @list: the entry to move
83109diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83110index 4bfde0e..d6e2e09 100644
83111--- a/include/linux/lockref.h
83112+++ b/include/linux/lockref.h
83113@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83114 return ((int)l->count < 0);
83115 }
83116
83117+static inline unsigned int __lockref_read(struct lockref *lockref)
83118+{
83119+ return lockref->count;
83120+}
83121+
83122+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83123+{
83124+ lockref->count = count;
83125+}
83126+
83127+static inline void __lockref_inc(struct lockref *lockref)
83128+{
83129+
83130+#ifdef CONFIG_PAX_REFCOUNT
83131+ atomic_inc((atomic_t *)&lockref->count);
83132+#else
83133+ lockref->count++;
83134+#endif
83135+
83136+}
83137+
83138+static inline void __lockref_dec(struct lockref *lockref)
83139+{
83140+
83141+#ifdef CONFIG_PAX_REFCOUNT
83142+ atomic_dec((atomic_t *)&lockref->count);
83143+#else
83144+ lockref->count--;
83145+#endif
83146+
83147+}
83148+
83149 #endif /* __LINUX_LOCKREF_H */
83150diff --git a/include/linux/math64.h b/include/linux/math64.h
83151index c45c089..298841c 100644
83152--- a/include/linux/math64.h
83153+++ b/include/linux/math64.h
83154@@ -15,7 +15,7 @@
83155 * This is commonly provided by 32bit archs to provide an optimized 64bit
83156 * divide.
83157 */
83158-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83159+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83160 {
83161 *remainder = dividend % divisor;
83162 return dividend / divisor;
83163@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83164 /**
83165 * div64_u64 - unsigned 64bit divide with 64bit divisor
83166 */
83167-static inline u64 div64_u64(u64 dividend, u64 divisor)
83168+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83169 {
83170 return dividend / divisor;
83171 }
83172@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83173 #define div64_ul(x, y) div_u64((x), (y))
83174
83175 #ifndef div_u64_rem
83176-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83177+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83178 {
83179 *remainder = do_div(dividend, divisor);
83180 return dividend;
83181@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83182 #endif
83183
83184 #ifndef div64_u64
83185-extern u64 div64_u64(u64 dividend, u64 divisor);
83186+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83187 #endif
83188
83189 #ifndef div64_s64
83190@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83191 * divide.
83192 */
83193 #ifndef div_u64
83194-static inline u64 div_u64(u64 dividend, u32 divisor)
83195+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83196 {
83197 u32 remainder;
83198 return div_u64_rem(dividend, divisor, &remainder);
83199diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83200index 3d385c8..deacb6a 100644
83201--- a/include/linux/mempolicy.h
83202+++ b/include/linux/mempolicy.h
83203@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83204 }
83205
83206 #define vma_policy(vma) ((vma)->vm_policy)
83207+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83208+{
83209+ vma->vm_policy = pol;
83210+}
83211
83212 static inline void mpol_get(struct mempolicy *pol)
83213 {
83214@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83215 }
83216
83217 #define vma_policy(vma) NULL
83218+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83219+{
83220+}
83221
83222 static inline int
83223 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83224diff --git a/include/linux/mm.h b/include/linux/mm.h
83225index dd5ea30..cf81cd1 100644
83226--- a/include/linux/mm.h
83227+++ b/include/linux/mm.h
83228@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83229
83230 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83231 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83232+
83233+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83234+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83235+#endif
83236+
83237 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83238 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83239 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83240@@ -256,8 +261,8 @@ struct vm_operations_struct {
83241 /* called by access_process_vm when get_user_pages() fails, typically
83242 * for use by special VMAs that can switch between memory and hardware
83243 */
83244- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83245- void *buf, int len, int write);
83246+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83247+ void *buf, size_t len, int write);
83248
83249 /* Called by the /proc/PID/maps code to ask the vma whether it
83250 * has a special name. Returning non-NULL will also cause this
83251@@ -291,6 +296,7 @@ struct vm_operations_struct {
83252 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83253 unsigned long size, pgoff_t pgoff);
83254 };
83255+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83256
83257 struct mmu_gather;
83258 struct inode;
83259@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83260 unsigned long *pfn);
83261 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83262 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83263-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83264- void *buf, int len, int write);
83265+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83266+ void *buf, size_t len, int write);
83267
83268 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83269 loff_t const holebegin, loff_t const holelen)
83270@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83271 }
83272 #endif
83273
83274-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83275-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83276- void *buf, int len, int write);
83277+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83278+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83279+ void *buf, size_t len, int write);
83280
83281 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83282 unsigned long start, unsigned long nr_pages,
83283@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83284 int clear_page_dirty_for_io(struct page *page);
83285 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83286
83287-/* Is the vma a continuation of the stack vma above it? */
83288-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83289-{
83290- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83291-}
83292-
83293-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83294- unsigned long addr)
83295-{
83296- return (vma->vm_flags & VM_GROWSDOWN) &&
83297- (vma->vm_start == addr) &&
83298- !vma_growsdown(vma->vm_prev, addr);
83299-}
83300-
83301-/* Is the vma a continuation of the stack vma below it? */
83302-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83303-{
83304- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83305-}
83306-
83307-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83308- unsigned long addr)
83309-{
83310- return (vma->vm_flags & VM_GROWSUP) &&
83311- (vma->vm_end == addr) &&
83312- !vma_growsup(vma->vm_next, addr);
83313-}
83314-
83315 extern struct task_struct *task_of_stack(struct task_struct *task,
83316 struct vm_area_struct *vma, bool in_group);
83317
83318@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83319 {
83320 return 0;
83321 }
83322+
83323+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83324+ unsigned long address)
83325+{
83326+ return 0;
83327+}
83328 #else
83329 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83330+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83331 #endif
83332
83333 #ifdef __PAGETABLE_PMD_FOLDED
83334@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83335 {
83336 return 0;
83337 }
83338+
83339+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83340+ unsigned long address)
83341+{
83342+ return 0;
83343+}
83344 #else
83345 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83346+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83347 #endif
83348
83349 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83350@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83351 NULL: pud_offset(pgd, address);
83352 }
83353
83354+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83355+{
83356+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83357+ NULL: pud_offset(pgd, address);
83358+}
83359+
83360 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83361 {
83362 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83363 NULL: pmd_offset(pud, address);
83364 }
83365+
83366+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83367+{
83368+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83369+ NULL: pmd_offset(pud, address);
83370+}
83371 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83372
83373 #if USE_SPLIT_PTE_PTLOCKS
83374@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83375 bool *need_rmap_locks);
83376 extern void exit_mmap(struct mm_struct *);
83377
83378+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83379+extern void gr_learn_resource(const struct task_struct *task, const int res,
83380+ const unsigned long wanted, const int gt);
83381+#else
83382+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83383+ const unsigned long wanted, const int gt)
83384+{
83385+}
83386+#endif
83387+
83388 static inline int check_data_rlimit(unsigned long rlim,
83389 unsigned long new,
83390 unsigned long start,
83391 unsigned long end_data,
83392 unsigned long start_data)
83393 {
83394+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83395 if (rlim < RLIM_INFINITY) {
83396 if (((new - start) + (end_data - start_data)) > rlim)
83397 return -ENOSPC;
83398@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83399 unsigned long addr, unsigned long len,
83400 unsigned long flags, struct page **pages);
83401
83402-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83403+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83404
83405 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83406 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83407@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83408 unsigned long len, unsigned long prot, unsigned long flags,
83409 unsigned long pgoff, unsigned long *populate);
83410 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83411+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83412
83413 #ifdef CONFIG_MMU
83414 extern int __mm_populate(unsigned long addr, unsigned long len,
83415@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83416 unsigned long high_limit;
83417 unsigned long align_mask;
83418 unsigned long align_offset;
83419+ unsigned long threadstack_offset;
83420 };
83421
83422-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83423-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83424+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83425+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83426
83427 /*
83428 * Search for an unmapped address range.
83429@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83430 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83431 */
83432 static inline unsigned long
83433-vm_unmapped_area(struct vm_unmapped_area_info *info)
83434+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83435 {
83436 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83437 return unmapped_area(info);
83438@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83439 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83440 struct vm_area_struct **pprev);
83441
83442+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83443+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83444+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83445+
83446 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83447 NULL if none. Assume start_addr < end_addr. */
83448 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83449@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83450 }
83451
83452 #ifdef CONFIG_MMU
83453-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83454+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83455 void vma_set_page_prot(struct vm_area_struct *vma);
83456 #else
83457-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83458+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83459 {
83460 return __pgprot(0);
83461 }
83462@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83463 static inline void vm_stat_account(struct mm_struct *mm,
83464 unsigned long flags, struct file *file, long pages)
83465 {
83466+
83467+#ifdef CONFIG_PAX_RANDMMAP
83468+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83469+#endif
83470+
83471 mm->total_vm += pages;
83472 }
83473 #endif /* CONFIG_PROC_FS */
83474@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83475 extern int sysctl_memory_failure_early_kill;
83476 extern int sysctl_memory_failure_recovery;
83477 extern void shake_page(struct page *p, int access);
83478-extern atomic_long_t num_poisoned_pages;
83479+extern atomic_long_unchecked_t num_poisoned_pages;
83480 extern int soft_offline_page(struct page *page, int flags);
83481
83482 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83483@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83484 static inline void setup_nr_node_ids(void) {}
83485 #endif
83486
83487+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83488+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83489+#else
83490+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83491+#endif
83492+
83493 #endif /* __KERNEL__ */
83494 #endif /* _LINUX_MM_H */
83495diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83496index 6d34aa2..d73d848 100644
83497--- a/include/linux/mm_types.h
83498+++ b/include/linux/mm_types.h
83499@@ -309,7 +309,9 @@ struct vm_area_struct {
83500 #ifdef CONFIG_NUMA
83501 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83502 #endif
83503-};
83504+
83505+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83506+} __randomize_layout;
83507
83508 struct core_thread {
83509 struct task_struct *task;
83510@@ -459,7 +461,25 @@ struct mm_struct {
83511 /* address of the bounds directory */
83512 void __user *bd_addr;
83513 #endif
83514-};
83515+
83516+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83517+ unsigned long pax_flags;
83518+#endif
83519+
83520+#ifdef CONFIG_PAX_DLRESOLVE
83521+ unsigned long call_dl_resolve;
83522+#endif
83523+
83524+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83525+ unsigned long call_syscall;
83526+#endif
83527+
83528+#ifdef CONFIG_PAX_ASLR
83529+ unsigned long delta_mmap; /* randomized offset */
83530+ unsigned long delta_stack; /* randomized offset */
83531+#endif
83532+
83533+} __randomize_layout;
83534
83535 static inline void mm_init_cpumask(struct mm_struct *mm)
83536 {
83537diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83538index c5d5278..f0b68c8 100644
83539--- a/include/linux/mmiotrace.h
83540+++ b/include/linux/mmiotrace.h
83541@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83542 /* Called from ioremap.c */
83543 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83544 void __iomem *addr);
83545-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83546+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83547
83548 /* For anyone to insert markers. Remember trailing newline. */
83549 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83550@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83551 {
83552 }
83553
83554-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83555+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83556 {
83557 }
83558
83559diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83560index 2f0856d..5a4bc1e 100644
83561--- a/include/linux/mmzone.h
83562+++ b/include/linux/mmzone.h
83563@@ -527,7 +527,7 @@ struct zone {
83564
83565 ZONE_PADDING(_pad3_)
83566 /* Zone statistics */
83567- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83568+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83569 } ____cacheline_internodealigned_in_smp;
83570
83571 enum zone_flags {
83572diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83573index 745def8..08a820b 100644
83574--- a/include/linux/mod_devicetable.h
83575+++ b/include/linux/mod_devicetable.h
83576@@ -139,7 +139,7 @@ struct usb_device_id {
83577 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83578 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83579
83580-#define HID_ANY_ID (~0)
83581+#define HID_ANY_ID (~0U)
83582 #define HID_BUS_ANY 0xffff
83583 #define HID_GROUP_ANY 0x0000
83584
83585@@ -475,7 +475,7 @@ struct dmi_system_id {
83586 const char *ident;
83587 struct dmi_strmatch matches[4];
83588 void *driver_data;
83589-};
83590+} __do_const;
83591 /*
83592 * struct dmi_device_id appears during expansion of
83593 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83594diff --git a/include/linux/module.h b/include/linux/module.h
83595index b653d7c..22a238f 100644
83596--- a/include/linux/module.h
83597+++ b/include/linux/module.h
83598@@ -17,9 +17,11 @@
83599 #include <linux/moduleparam.h>
83600 #include <linux/jump_label.h>
83601 #include <linux/export.h>
83602+#include <linux/fs.h>
83603
83604 #include <linux/percpu.h>
83605 #include <asm/module.h>
83606+#include <asm/pgtable.h>
83607
83608 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83609 #define MODULE_SIG_STRING "~Module signature appended~\n"
83610@@ -42,7 +44,7 @@ struct module_kobject {
83611 struct kobject *drivers_dir;
83612 struct module_param_attrs *mp;
83613 struct completion *kobj_completion;
83614-};
83615+} __randomize_layout;
83616
83617 struct module_attribute {
83618 struct attribute attr;
83619@@ -54,12 +56,13 @@ struct module_attribute {
83620 int (*test)(struct module *);
83621 void (*free)(struct module *);
83622 };
83623+typedef struct module_attribute __no_const module_attribute_no_const;
83624
83625 struct module_version_attribute {
83626 struct module_attribute mattr;
83627 const char *module_name;
83628 const char *version;
83629-} __attribute__ ((__aligned__(sizeof(void *))));
83630+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83631
83632 extern ssize_t __modver_version_show(struct module_attribute *,
83633 struct module_kobject *, char *);
83634@@ -221,7 +224,7 @@ struct module {
83635
83636 /* Sysfs stuff. */
83637 struct module_kobject mkobj;
83638- struct module_attribute *modinfo_attrs;
83639+ module_attribute_no_const *modinfo_attrs;
83640 const char *version;
83641 const char *srcversion;
83642 struct kobject *holders_dir;
83643@@ -270,19 +273,16 @@ struct module {
83644 int (*init)(void);
83645
83646 /* If this is non-NULL, vfree after init() returns */
83647- void *module_init;
83648+ void *module_init_rx, *module_init_rw;
83649
83650 /* Here is the actual code + data, vfree'd on unload. */
83651- void *module_core;
83652+ void *module_core_rx, *module_core_rw;
83653
83654 /* Here are the sizes of the init and core sections */
83655- unsigned int init_size, core_size;
83656+ unsigned int init_size_rw, core_size_rw;
83657
83658 /* The size of the executable code in each section. */
83659- unsigned int init_text_size, core_text_size;
83660-
83661- /* Size of RO sections of the module (text+rodata) */
83662- unsigned int init_ro_size, core_ro_size;
83663+ unsigned int init_size_rx, core_size_rx;
83664
83665 /* Arch-specific module values */
83666 struct mod_arch_specific arch;
83667@@ -338,6 +338,10 @@ struct module {
83668 #ifdef CONFIG_EVENT_TRACING
83669 struct ftrace_event_call **trace_events;
83670 unsigned int num_trace_events;
83671+ struct file_operations trace_id;
83672+ struct file_operations trace_enable;
83673+ struct file_operations trace_format;
83674+ struct file_operations trace_filter;
83675 #endif
83676 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83677 unsigned int num_ftrace_callsites;
83678@@ -361,7 +365,7 @@ struct module {
83679 ctor_fn_t *ctors;
83680 unsigned int num_ctors;
83681 #endif
83682-};
83683+} __randomize_layout;
83684 #ifndef MODULE_ARCH_INIT
83685 #define MODULE_ARCH_INIT {}
83686 #endif
83687@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83688 bool is_module_percpu_address(unsigned long addr);
83689 bool is_module_text_address(unsigned long addr);
83690
83691+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83692+{
83693+
83694+#ifdef CONFIG_PAX_KERNEXEC
83695+ if (ktla_ktva(addr) >= (unsigned long)start &&
83696+ ktla_ktva(addr) < (unsigned long)start + size)
83697+ return 1;
83698+#endif
83699+
83700+ return ((void *)addr >= start && (void *)addr < start + size);
83701+}
83702+
83703+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83704+{
83705+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83706+}
83707+
83708+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83709+{
83710+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83711+}
83712+
83713+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83714+{
83715+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83716+}
83717+
83718+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83719+{
83720+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83721+}
83722+
83723 static inline bool within_module_core(unsigned long addr,
83724 const struct module *mod)
83725 {
83726- return (unsigned long)mod->module_core <= addr &&
83727- addr < (unsigned long)mod->module_core + mod->core_size;
83728+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83729 }
83730
83731 static inline bool within_module_init(unsigned long addr,
83732 const struct module *mod)
83733 {
83734- return (unsigned long)mod->module_init <= addr &&
83735- addr < (unsigned long)mod->module_init + mod->init_size;
83736+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83737 }
83738
83739 static inline bool within_module(unsigned long addr, const struct module *mod)
83740diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83741index f755626..641f822 100644
83742--- a/include/linux/moduleloader.h
83743+++ b/include/linux/moduleloader.h
83744@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83745 sections. Returns NULL on failure. */
83746 void *module_alloc(unsigned long size);
83747
83748+#ifdef CONFIG_PAX_KERNEXEC
83749+void *module_alloc_exec(unsigned long size);
83750+#else
83751+#define module_alloc_exec(x) module_alloc(x)
83752+#endif
83753+
83754 /* Free memory returned from module_alloc. */
83755 void module_memfree(void *module_region);
83756
83757+#ifdef CONFIG_PAX_KERNEXEC
83758+void module_memfree_exec(void *module_region);
83759+#else
83760+#define module_memfree_exec(x) module_memfree((x))
83761+#endif
83762+
83763 /*
83764 * Apply the given relocation to the (simplified) ELF. Return -error
83765 * or 0.
83766@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83767 unsigned int relsec,
83768 struct module *me)
83769 {
83770+#ifdef CONFIG_MODULES
83771 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83772 module_name(me));
83773+#endif
83774 return -ENOEXEC;
83775 }
83776 #endif
83777@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83778 unsigned int relsec,
83779 struct module *me)
83780 {
83781+#ifdef CONFIG_MODULES
83782 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83783 module_name(me));
83784+#endif
83785 return -ENOEXEC;
83786 }
83787 #endif
83788diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83789index 1c9effa..1160bdd 100644
83790--- a/include/linux/moduleparam.h
83791+++ b/include/linux/moduleparam.h
83792@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83793 * @len is usually just sizeof(string).
83794 */
83795 #define module_param_string(name, string, len, perm) \
83796- static const struct kparam_string __param_string_##name \
83797+ static const struct kparam_string __param_string_##name __used \
83798 = { len, string }; \
83799 __module_param_call(MODULE_PARAM_PREFIX, name, \
83800 &param_ops_string, \
83801@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83802 */
83803 #define module_param_array_named(name, array, type, nump, perm) \
83804 param_check_##type(name, &(array)[0]); \
83805- static const struct kparam_array __param_arr_##name \
83806+ static const struct kparam_array __param_arr_##name __used \
83807 = { .max = ARRAY_SIZE(array), .num = nump, \
83808 .ops = &param_ops_##type, \
83809 .elemsize = sizeof(array[0]), .elem = array }; \
83810diff --git a/include/linux/mount.h b/include/linux/mount.h
83811index c2c561d..a5f2a8c 100644
83812--- a/include/linux/mount.h
83813+++ b/include/linux/mount.h
83814@@ -66,7 +66,7 @@ struct vfsmount {
83815 struct dentry *mnt_root; /* root of the mounted tree */
83816 struct super_block *mnt_sb; /* pointer to superblock */
83817 int mnt_flags;
83818-};
83819+} __randomize_layout;
83820
83821 struct file; /* forward dec */
83822 struct path;
83823diff --git a/include/linux/namei.h b/include/linux/namei.h
83824index c899077..b9a2010 100644
83825--- a/include/linux/namei.h
83826+++ b/include/linux/namei.h
83827@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83828 extern void unlock_rename(struct dentry *, struct dentry *);
83829
83830 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83831-extern void nd_set_link(struct nameidata *nd, char *path);
83832-extern char *nd_get_link(struct nameidata *nd);
83833+extern void nd_set_link(struct nameidata *nd, const char *path);
83834+extern const char *nd_get_link(const struct nameidata *nd);
83835
83836 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83837 {
83838diff --git a/include/linux/net.h b/include/linux/net.h
83839index 17d8339..81656c0 100644
83840--- a/include/linux/net.h
83841+++ b/include/linux/net.h
83842@@ -192,7 +192,7 @@ struct net_proto_family {
83843 int (*create)(struct net *net, struct socket *sock,
83844 int protocol, int kern);
83845 struct module *owner;
83846-};
83847+} __do_const;
83848
83849 struct iovec;
83850 struct kvec;
83851diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83852index 52fd8e8..19430a1 100644
83853--- a/include/linux/netdevice.h
83854+++ b/include/linux/netdevice.h
83855@@ -1191,6 +1191,7 @@ struct net_device_ops {
83856 u8 state);
83857 #endif
83858 };
83859+typedef struct net_device_ops __no_const net_device_ops_no_const;
83860
83861 /**
83862 * enum net_device_priv_flags - &struct net_device priv_flags
83863@@ -1537,10 +1538,10 @@ struct net_device {
83864
83865 struct net_device_stats stats;
83866
83867- atomic_long_t rx_dropped;
83868- atomic_long_t tx_dropped;
83869+ atomic_long_unchecked_t rx_dropped;
83870+ atomic_long_unchecked_t tx_dropped;
83871
83872- atomic_t carrier_changes;
83873+ atomic_unchecked_t carrier_changes;
83874
83875 #ifdef CONFIG_WIRELESS_EXT
83876 const struct iw_handler_def * wireless_handlers;
83877diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83878index 2517ece..0bbfcfb 100644
83879--- a/include/linux/netfilter.h
83880+++ b/include/linux/netfilter.h
83881@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83882 #endif
83883 /* Use the module struct to lock set/get code in place */
83884 struct module *owner;
83885-};
83886+} __do_const;
83887
83888 /* Function to register/unregister hook points. */
83889 int nf_register_hook(struct nf_hook_ops *reg);
83890diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83891index e955d47..04a5338 100644
83892--- a/include/linux/netfilter/nfnetlink.h
83893+++ b/include/linux/netfilter/nfnetlink.h
83894@@ -19,7 +19,7 @@ struct nfnl_callback {
83895 const struct nlattr * const cda[]);
83896 const struct nla_policy *policy; /* netlink attribute policy */
83897 const u_int16_t attr_count; /* number of nlattr's */
83898-};
83899+} __do_const;
83900
83901 struct nfnetlink_subsystem {
83902 const char *name;
83903diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83904new file mode 100644
83905index 0000000..33f4af8
83906--- /dev/null
83907+++ b/include/linux/netfilter/xt_gradm.h
83908@@ -0,0 +1,9 @@
83909+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83910+#define _LINUX_NETFILTER_XT_GRADM_H 1
83911+
83912+struct xt_gradm_mtinfo {
83913+ __u16 flags;
83914+ __u16 invflags;
83915+};
83916+
83917+#endif
83918diff --git a/include/linux/nls.h b/include/linux/nls.h
83919index 520681b..2b7fabb 100644
83920--- a/include/linux/nls.h
83921+++ b/include/linux/nls.h
83922@@ -31,7 +31,7 @@ struct nls_table {
83923 const unsigned char *charset2upper;
83924 struct module *owner;
83925 struct nls_table *next;
83926-};
83927+} __do_const;
83928
83929 /* this value hold the maximum octet of charset */
83930 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
83931@@ -46,7 +46,7 @@ enum utf16_endian {
83932 /* nls_base.c */
83933 extern int __register_nls(struct nls_table *, struct module *);
83934 extern int unregister_nls(struct nls_table *);
83935-extern struct nls_table *load_nls(char *);
83936+extern struct nls_table *load_nls(const char *);
83937 extern void unload_nls(struct nls_table *);
83938 extern struct nls_table *load_nls_default(void);
83939 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
83940diff --git a/include/linux/notifier.h b/include/linux/notifier.h
83941index d14a4c3..a078786 100644
83942--- a/include/linux/notifier.h
83943+++ b/include/linux/notifier.h
83944@@ -54,7 +54,8 @@ struct notifier_block {
83945 notifier_fn_t notifier_call;
83946 struct notifier_block __rcu *next;
83947 int priority;
83948-};
83949+} __do_const;
83950+typedef struct notifier_block __no_const notifier_block_no_const;
83951
83952 struct atomic_notifier_head {
83953 spinlock_t lock;
83954diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
83955index b2a0f15..4d7da32 100644
83956--- a/include/linux/oprofile.h
83957+++ b/include/linux/oprofile.h
83958@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
83959 int oprofilefs_create_ro_ulong(struct dentry * root,
83960 char const * name, ulong * val);
83961
83962-/** Create a file for read-only access to an atomic_t. */
83963+/** Create a file for read-only access to an atomic_unchecked_t. */
83964 int oprofilefs_create_ro_atomic(struct dentry * root,
83965- char const * name, atomic_t * val);
83966+ char const * name, atomic_unchecked_t * val);
83967
83968 /** create a directory */
83969 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
83970diff --git a/include/linux/padata.h b/include/linux/padata.h
83971index 4386946..f50c615 100644
83972--- a/include/linux/padata.h
83973+++ b/include/linux/padata.h
83974@@ -129,7 +129,7 @@ struct parallel_data {
83975 struct padata_serial_queue __percpu *squeue;
83976 atomic_t reorder_objects;
83977 atomic_t refcnt;
83978- atomic_t seq_nr;
83979+ atomic_unchecked_t seq_nr;
83980 struct padata_cpumask cpumask;
83981 spinlock_t lock ____cacheline_aligned;
83982 unsigned int processed;
83983diff --git a/include/linux/path.h b/include/linux/path.h
83984index d137218..be0c176 100644
83985--- a/include/linux/path.h
83986+++ b/include/linux/path.h
83987@@ -1,13 +1,15 @@
83988 #ifndef _LINUX_PATH_H
83989 #define _LINUX_PATH_H
83990
83991+#include <linux/compiler.h>
83992+
83993 struct dentry;
83994 struct vfsmount;
83995
83996 struct path {
83997 struct vfsmount *mnt;
83998 struct dentry *dentry;
83999-};
84000+} __randomize_layout;
84001
84002 extern void path_get(const struct path *);
84003 extern void path_put(const struct path *);
84004diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84005index 8c78950..0d74ed9 100644
84006--- a/include/linux/pci_hotplug.h
84007+++ b/include/linux/pci_hotplug.h
84008@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84009 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84010 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84011 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84012-};
84013+} __do_const;
84014+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84015
84016 /**
84017 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84018diff --git a/include/linux/percpu.h b/include/linux/percpu.h
84019index caebf2a..4c3ae9d 100644
84020--- a/include/linux/percpu.h
84021+++ b/include/linux/percpu.h
84022@@ -34,7 +34,7 @@
84023 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
84024 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
84025 */
84026-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
84027+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
84028 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
84029
84030 /*
84031diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84032index 664de5a..b3e1bf4 100644
84033--- a/include/linux/perf_event.h
84034+++ b/include/linux/perf_event.h
84035@@ -336,8 +336,8 @@ struct perf_event {
84036
84037 enum perf_event_active_state state;
84038 unsigned int attach_state;
84039- local64_t count;
84040- atomic64_t child_count;
84041+ local64_t count; /* PaX: fix it one day */
84042+ atomic64_unchecked_t child_count;
84043
84044 /*
84045 * These are the total time in nanoseconds that the event
84046@@ -388,8 +388,8 @@ struct perf_event {
84047 * These accumulate total time (in nanoseconds) that children
84048 * events have been enabled and running, respectively.
84049 */
84050- atomic64_t child_total_time_enabled;
84051- atomic64_t child_total_time_running;
84052+ atomic64_unchecked_t child_total_time_enabled;
84053+ atomic64_unchecked_t child_total_time_running;
84054
84055 /*
84056 * Protect attach/detach and child_list:
84057@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84058 entry->ip[entry->nr++] = ip;
84059 }
84060
84061-extern int sysctl_perf_event_paranoid;
84062+extern int sysctl_perf_event_legitimately_concerned;
84063 extern int sysctl_perf_event_mlock;
84064 extern int sysctl_perf_event_sample_rate;
84065 extern int sysctl_perf_cpu_time_max_percent;
84066@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84067 loff_t *ppos);
84068
84069
84070+static inline bool perf_paranoid_any(void)
84071+{
84072+ return sysctl_perf_event_legitimately_concerned > 2;
84073+}
84074+
84075 static inline bool perf_paranoid_tracepoint_raw(void)
84076 {
84077- return sysctl_perf_event_paranoid > -1;
84078+ return sysctl_perf_event_legitimately_concerned > -1;
84079 }
84080
84081 static inline bool perf_paranoid_cpu(void)
84082 {
84083- return sysctl_perf_event_paranoid > 0;
84084+ return sysctl_perf_event_legitimately_concerned > 0;
84085 }
84086
84087 static inline bool perf_paranoid_kernel(void)
84088 {
84089- return sysctl_perf_event_paranoid > 1;
84090+ return sysctl_perf_event_legitimately_concerned > 1;
84091 }
84092
84093 extern void perf_event_init(void);
84094@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
84095 struct device_attribute attr;
84096 u64 id;
84097 const char *event_str;
84098-};
84099+} __do_const;
84100
84101 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84102 static struct perf_pmu_events_attr _var = { \
84103diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84104index b9cf6c5..5462472 100644
84105--- a/include/linux/pid_namespace.h
84106+++ b/include/linux/pid_namespace.h
84107@@ -45,7 +45,7 @@ struct pid_namespace {
84108 int hide_pid;
84109 int reboot; /* group exit code if this pidns was rebooted */
84110 struct ns_common ns;
84111-};
84112+} __randomize_layout;
84113
84114 extern struct pid_namespace init_pid_ns;
84115
84116diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84117index eb8b8ac..62649e1 100644
84118--- a/include/linux/pipe_fs_i.h
84119+++ b/include/linux/pipe_fs_i.h
84120@@ -47,10 +47,10 @@ struct pipe_inode_info {
84121 struct mutex mutex;
84122 wait_queue_head_t wait;
84123 unsigned int nrbufs, curbuf, buffers;
84124- unsigned int readers;
84125- unsigned int writers;
84126- unsigned int files;
84127- unsigned int waiting_writers;
84128+ atomic_t readers;
84129+ atomic_t writers;
84130+ atomic_t files;
84131+ atomic_t waiting_writers;
84132 unsigned int r_counter;
84133 unsigned int w_counter;
84134 struct page *tmp_page;
84135diff --git a/include/linux/pm.h b/include/linux/pm.h
84136index 8b59763..8a05939 100644
84137--- a/include/linux/pm.h
84138+++ b/include/linux/pm.h
84139@@ -608,6 +608,7 @@ struct dev_pm_domain {
84140 struct dev_pm_ops ops;
84141 void (*detach)(struct device *dev, bool power_off);
84142 };
84143+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84144
84145 /*
84146 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84147diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84148index a9edab2..8bada56 100644
84149--- a/include/linux/pm_domain.h
84150+++ b/include/linux/pm_domain.h
84151@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84152 int (*save_state)(struct device *dev);
84153 int (*restore_state)(struct device *dev);
84154 bool (*active_wakeup)(struct device *dev);
84155-};
84156+} __no_const;
84157
84158 struct gpd_cpuidle_data {
84159 unsigned int saved_exit_latency;
84160- struct cpuidle_state *idle_state;
84161+ cpuidle_state_no_const *idle_state;
84162 };
84163
84164 struct generic_pm_domain {
84165diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84166index 30e84d4..22278b4 100644
84167--- a/include/linux/pm_runtime.h
84168+++ b/include/linux/pm_runtime.h
84169@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84170
84171 static inline void pm_runtime_mark_last_busy(struct device *dev)
84172 {
84173- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84174+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84175 }
84176
84177 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84178diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84179index 195aafc..49a7bc2 100644
84180--- a/include/linux/pnp.h
84181+++ b/include/linux/pnp.h
84182@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84183 struct pnp_fixup {
84184 char id[7];
84185 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84186-};
84187+} __do_const;
84188
84189 /* config parameters */
84190 #define PNP_CONFIG_NORMAL 0x0001
84191diff --git a/include/linux/poison.h b/include/linux/poison.h
84192index 2110a81..13a11bb 100644
84193--- a/include/linux/poison.h
84194+++ b/include/linux/poison.h
84195@@ -19,8 +19,8 @@
84196 * under normal circumstances, used to verify that nobody uses
84197 * non-initialized list entries.
84198 */
84199-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84200-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84201+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84202+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84203
84204 /********** include/linux/timer.h **********/
84205 /*
84206diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84207index d8b187c3..9a9257a 100644
84208--- a/include/linux/power/smartreflex.h
84209+++ b/include/linux/power/smartreflex.h
84210@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84211 int (*notify)(struct omap_sr *sr, u32 status);
84212 u8 notify_flags;
84213 u8 class_type;
84214-};
84215+} __do_const;
84216
84217 /**
84218 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84219diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84220index 4ea1d37..80f4b33 100644
84221--- a/include/linux/ppp-comp.h
84222+++ b/include/linux/ppp-comp.h
84223@@ -84,7 +84,7 @@ struct compressor {
84224 struct module *owner;
84225 /* Extra skb space needed by the compressor algorithm */
84226 unsigned int comp_extra;
84227-};
84228+} __do_const;
84229
84230 /*
84231 * The return value from decompress routine is the length of the
84232diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84233index de83b4e..c4b997d 100644
84234--- a/include/linux/preempt.h
84235+++ b/include/linux/preempt.h
84236@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84237 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84238 #endif
84239
84240+#define raw_preempt_count_add(val) __preempt_count_add(val)
84241+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84242+
84243 #define __preempt_count_inc() __preempt_count_add(1)
84244 #define __preempt_count_dec() __preempt_count_sub(1)
84245
84246 #define preempt_count_inc() preempt_count_add(1)
84247+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84248 #define preempt_count_dec() preempt_count_sub(1)
84249+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84250
84251 #ifdef CONFIG_PREEMPT_COUNT
84252
84253@@ -41,6 +46,12 @@ do { \
84254 barrier(); \
84255 } while (0)
84256
84257+#define raw_preempt_disable() \
84258+do { \
84259+ raw_preempt_count_inc(); \
84260+ barrier(); \
84261+} while (0)
84262+
84263 #define sched_preempt_enable_no_resched() \
84264 do { \
84265 barrier(); \
84266@@ -49,6 +60,12 @@ do { \
84267
84268 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84269
84270+#define raw_preempt_enable_no_resched() \
84271+do { \
84272+ barrier(); \
84273+ raw_preempt_count_dec(); \
84274+} while (0)
84275+
84276 #ifdef CONFIG_PREEMPT
84277 #define preempt_enable() \
84278 do { \
84279@@ -113,8 +130,10 @@ do { \
84280 * region.
84281 */
84282 #define preempt_disable() barrier()
84283+#define raw_preempt_disable() barrier()
84284 #define sched_preempt_enable_no_resched() barrier()
84285 #define preempt_enable_no_resched() barrier()
84286+#define raw_preempt_enable_no_resched() barrier()
84287 #define preempt_enable() barrier()
84288 #define preempt_check_resched() do { } while (0)
84289
84290@@ -128,11 +147,13 @@ do { \
84291 /*
84292 * Modules have no business playing preemption tricks.
84293 */
84294+#ifndef CONFIG_PAX_KERNEXEC
84295 #undef sched_preempt_enable_no_resched
84296 #undef preempt_enable_no_resched
84297 #undef preempt_enable_no_resched_notrace
84298 #undef preempt_check_resched
84299 #endif
84300+#endif
84301
84302 #define preempt_set_need_resched() \
84303 do { \
84304diff --git a/include/linux/printk.h b/include/linux/printk.h
84305index 4d5bf57..d94eccf 100644
84306--- a/include/linux/printk.h
84307+++ b/include/linux/printk.h
84308@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84309 #endif
84310
84311 typedef int(*printk_func_t)(const char *fmt, va_list args);
84312+extern int kptr_restrict;
84313
84314 #ifdef CONFIG_PRINTK
84315 asmlinkage __printf(5, 0)
84316@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84317
84318 extern int printk_delay_msec;
84319 extern int dmesg_restrict;
84320-extern int kptr_restrict;
84321
84322 extern void wake_up_klogd(void);
84323
84324diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84325index b97bf2e..f14c92d4 100644
84326--- a/include/linux/proc_fs.h
84327+++ b/include/linux/proc_fs.h
84328@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84329 extern struct proc_dir_entry *proc_symlink(const char *,
84330 struct proc_dir_entry *, const char *);
84331 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84332+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84333 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84334 struct proc_dir_entry *, void *);
84335+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84336+ struct proc_dir_entry *, void *);
84337 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84338 struct proc_dir_entry *);
84339
84340@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84341 return proc_create_data(name, mode, parent, proc_fops, NULL);
84342 }
84343
84344+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84345+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84346+{
84347+#ifdef CONFIG_GRKERNSEC_PROC_USER
84348+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84349+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84350+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84351+#else
84352+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84353+#endif
84354+}
84355+
84356+
84357 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84358 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84359 extern void *PDE_DATA(const struct inode *);
84360@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84361 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84362 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84363 struct proc_dir_entry *parent) {return NULL;}
84364+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84365+ struct proc_dir_entry *parent) { return NULL; }
84366 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84367 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84368+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84369+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84370 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84371 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84372 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84373@@ -79,7 +99,7 @@ struct net;
84374 static inline struct proc_dir_entry *proc_net_mkdir(
84375 struct net *net, const char *name, struct proc_dir_entry *parent)
84376 {
84377- return proc_mkdir_data(name, 0, parent, net);
84378+ return proc_mkdir_data_restrict(name, 0, parent, net);
84379 }
84380
84381 #endif /* _LINUX_PROC_FS_H */
84382diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84383index 42dfc61..8113a99 100644
84384--- a/include/linux/proc_ns.h
84385+++ b/include/linux/proc_ns.h
84386@@ -16,7 +16,7 @@ struct proc_ns_operations {
84387 struct ns_common *(*get)(struct task_struct *task);
84388 void (*put)(struct ns_common *ns);
84389 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84390-};
84391+} __do_const __randomize_layout;
84392
84393 extern const struct proc_ns_operations netns_operations;
84394 extern const struct proc_ns_operations utsns_operations;
84395diff --git a/include/linux/quota.h b/include/linux/quota.h
84396index b86df49..8002997 100644
84397--- a/include/linux/quota.h
84398+++ b/include/linux/quota.h
84399@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84400
84401 extern bool qid_eq(struct kqid left, struct kqid right);
84402 extern bool qid_lt(struct kqid left, struct kqid right);
84403-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84404+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84405 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84406 extern bool qid_valid(struct kqid qid);
84407
84408diff --git a/include/linux/random.h b/include/linux/random.h
84409index b05856e..0a9f14e 100644
84410--- a/include/linux/random.h
84411+++ b/include/linux/random.h
84412@@ -9,9 +9,19 @@
84413 #include <uapi/linux/random.h>
84414
84415 extern void add_device_randomness(const void *, unsigned int);
84416+
84417+static inline void add_latent_entropy(void)
84418+{
84419+
84420+#ifdef LATENT_ENTROPY_PLUGIN
84421+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84422+#endif
84423+
84424+}
84425+
84426 extern void add_input_randomness(unsigned int type, unsigned int code,
84427- unsigned int value);
84428-extern void add_interrupt_randomness(int irq, int irq_flags);
84429+ unsigned int value) __latent_entropy;
84430+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84431
84432 extern void get_random_bytes(void *buf, int nbytes);
84433 extern void get_random_bytes_arch(void *buf, int nbytes);
84434@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84435 extern const struct file_operations random_fops, urandom_fops;
84436 #endif
84437
84438-unsigned int get_random_int(void);
84439+unsigned int __intentional_overflow(-1) get_random_int(void);
84440 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84441
84442-u32 prandom_u32(void);
84443+u32 prandom_u32(void) __intentional_overflow(-1);
84444 void prandom_bytes(void *buf, size_t nbytes);
84445 void prandom_seed(u32 seed);
84446 void prandom_reseed_late(void);
84447@@ -37,6 +47,11 @@ struct rnd_state {
84448 u32 prandom_u32_state(struct rnd_state *state);
84449 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84450
84451+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84452+{
84453+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84454+}
84455+
84456 /**
84457 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84458 * @ep_ro: right open interval endpoint
84459@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84460 *
84461 * Returns: pseudo-random number in interval [0, ep_ro)
84462 */
84463-static inline u32 prandom_u32_max(u32 ep_ro)
84464+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84465 {
84466 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84467 }
84468diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84469index 378c5ee..aa84a47 100644
84470--- a/include/linux/rbtree_augmented.h
84471+++ b/include/linux/rbtree_augmented.h
84472@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84473 old->rbaugmented = rbcompute(old); \
84474 } \
84475 rbstatic const struct rb_augment_callbacks rbname = { \
84476- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84477+ .propagate = rbname ## _propagate, \
84478+ .copy = rbname ## _copy, \
84479+ .rotate = rbname ## _rotate \
84480 };
84481
84482
84483diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84484index 529bc94..82ce778 100644
84485--- a/include/linux/rculist.h
84486+++ b/include/linux/rculist.h
84487@@ -29,8 +29,8 @@
84488 */
84489 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84490 {
84491- ACCESS_ONCE(list->next) = list;
84492- ACCESS_ONCE(list->prev) = list;
84493+ ACCESS_ONCE_RW(list->next) = list;
84494+ ACCESS_ONCE_RW(list->prev) = list;
84495 }
84496
84497 /*
84498@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84499 struct list_head *prev, struct list_head *next);
84500 #endif
84501
84502+void __pax_list_add_rcu(struct list_head *new,
84503+ struct list_head *prev, struct list_head *next);
84504+
84505 /**
84506 * list_add_rcu - add a new entry to rcu-protected list
84507 * @new: new entry to be added
84508@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84509 __list_add_rcu(new, head, head->next);
84510 }
84511
84512+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84513+{
84514+ __pax_list_add_rcu(new, head, head->next);
84515+}
84516+
84517 /**
84518 * list_add_tail_rcu - add a new entry to rcu-protected list
84519 * @new: new entry to be added
84520@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84521 __list_add_rcu(new, head->prev, head);
84522 }
84523
84524+static inline void pax_list_add_tail_rcu(struct list_head *new,
84525+ struct list_head *head)
84526+{
84527+ __pax_list_add_rcu(new, head->prev, head);
84528+}
84529+
84530 /**
84531 * list_del_rcu - deletes entry from list without re-initialization
84532 * @entry: the element to delete from the list.
84533@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84534 entry->prev = LIST_POISON2;
84535 }
84536
84537+extern void pax_list_del_rcu(struct list_head *entry);
84538+
84539 /**
84540 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84541 * @n: the element to delete from the hash list.
84542diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84543index ed4f593..8a51501 100644
84544--- a/include/linux/rcupdate.h
84545+++ b/include/linux/rcupdate.h
84546@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84547 #define rcu_note_voluntary_context_switch(t) \
84548 do { \
84549 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84550- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84551+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84552 } while (0)
84553 #else /* #ifdef CONFIG_TASKS_RCU */
84554 #define TASKS_RCU(x) do { } while (0)
84555diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84556index 67fc8fc..a90f7d8 100644
84557--- a/include/linux/reboot.h
84558+++ b/include/linux/reboot.h
84559@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84560 */
84561
84562 extern void migrate_to_reboot_cpu(void);
84563-extern void machine_restart(char *cmd);
84564-extern void machine_halt(void);
84565-extern void machine_power_off(void);
84566+extern void machine_restart(char *cmd) __noreturn;
84567+extern void machine_halt(void) __noreturn;
84568+extern void machine_power_off(void) __noreturn;
84569
84570 extern void machine_shutdown(void);
84571 struct pt_regs;
84572@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84573 */
84574
84575 extern void kernel_restart_prepare(char *cmd);
84576-extern void kernel_restart(char *cmd);
84577-extern void kernel_halt(void);
84578-extern void kernel_power_off(void);
84579+extern void kernel_restart(char *cmd) __noreturn;
84580+extern void kernel_halt(void) __noreturn;
84581+extern void kernel_power_off(void) __noreturn;
84582
84583 extern int C_A_D; /* for sysctl */
84584 void ctrl_alt_del(void);
84585@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84586 * Emergency restart, callable from an interrupt handler.
84587 */
84588
84589-extern void emergency_restart(void);
84590+extern void emergency_restart(void) __noreturn;
84591 #include <asm/emergency-restart.h>
84592
84593 #endif /* _LINUX_REBOOT_H */
84594diff --git a/include/linux/regset.h b/include/linux/regset.h
84595index 8e0c9fe..ac4d221 100644
84596--- a/include/linux/regset.h
84597+++ b/include/linux/regset.h
84598@@ -161,7 +161,8 @@ struct user_regset {
84599 unsigned int align;
84600 unsigned int bias;
84601 unsigned int core_note_type;
84602-};
84603+} __do_const;
84604+typedef struct user_regset __no_const user_regset_no_const;
84605
84606 /**
84607 * struct user_regset_view - available regsets
84608diff --git a/include/linux/relay.h b/include/linux/relay.h
84609index d7c8359..818daf5 100644
84610--- a/include/linux/relay.h
84611+++ b/include/linux/relay.h
84612@@ -157,7 +157,7 @@ struct rchan_callbacks
84613 * The callback should return 0 if successful, negative if not.
84614 */
84615 int (*remove_buf_file)(struct dentry *dentry);
84616-};
84617+} __no_const;
84618
84619 /*
84620 * CONFIG_RELAY kernel API, kernel/relay.c
84621diff --git a/include/linux/rio.h b/include/linux/rio.h
84622index 6bda06f..bf39a9b 100644
84623--- a/include/linux/rio.h
84624+++ b/include/linux/rio.h
84625@@ -358,7 +358,7 @@ struct rio_ops {
84626 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84627 u64 rstart, u32 size, u32 flags);
84628 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84629-};
84630+} __no_const;
84631
84632 #define RIO_RESOURCE_MEM 0x00000100
84633 #define RIO_RESOURCE_DOORBELL 0x00000200
84634diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84635index d9d7e7e..86f47ac 100644
84636--- a/include/linux/rmap.h
84637+++ b/include/linux/rmap.h
84638@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84639 void anon_vma_init(void); /* create anon_vma_cachep */
84640 int anon_vma_prepare(struct vm_area_struct *);
84641 void unlink_anon_vmas(struct vm_area_struct *);
84642-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84643-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84644+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84645+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84646
84647 static inline void anon_vma_merge(struct vm_area_struct *vma,
84648 struct vm_area_struct *next)
84649diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84650index ed8f9e7..999bc96 100644
84651--- a/include/linux/scatterlist.h
84652+++ b/include/linux/scatterlist.h
84653@@ -1,6 +1,7 @@
84654 #ifndef _LINUX_SCATTERLIST_H
84655 #define _LINUX_SCATTERLIST_H
84656
84657+#include <linux/sched.h>
84658 #include <linux/string.h>
84659 #include <linux/bug.h>
84660 #include <linux/mm.h>
84661@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84662 #ifdef CONFIG_DEBUG_SG
84663 BUG_ON(!virt_addr_valid(buf));
84664 #endif
84665+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84666+ if (object_starts_on_stack(buf)) {
84667+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84668+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84669+ } else
84670+#endif
84671 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84672 }
84673
84674diff --git a/include/linux/sched.h b/include/linux/sched.h
84675index 8db31ef..0af1f81 100644
84676--- a/include/linux/sched.h
84677+++ b/include/linux/sched.h
84678@@ -133,6 +133,7 @@ struct fs_struct;
84679 struct perf_event_context;
84680 struct blk_plug;
84681 struct filename;
84682+struct linux_binprm;
84683
84684 #define VMACACHE_BITS 2
84685 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84686@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84687 extern int in_sched_functions(unsigned long addr);
84688
84689 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84690-extern signed long schedule_timeout(signed long timeout);
84691+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84692 extern signed long schedule_timeout_interruptible(signed long timeout);
84693 extern signed long schedule_timeout_killable(signed long timeout);
84694 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84695@@ -426,6 +427,19 @@ struct nsproxy;
84696 struct user_namespace;
84697
84698 #ifdef CONFIG_MMU
84699+
84700+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84701+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84702+#else
84703+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84704+{
84705+ return 0;
84706+}
84707+#endif
84708+
84709+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84710+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84711+
84712 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84713 extern unsigned long
84714 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84715@@ -724,6 +738,17 @@ struct signal_struct {
84716 #ifdef CONFIG_TASKSTATS
84717 struct taskstats *stats;
84718 #endif
84719+
84720+#ifdef CONFIG_GRKERNSEC
84721+ u32 curr_ip;
84722+ u32 saved_ip;
84723+ u32 gr_saddr;
84724+ u32 gr_daddr;
84725+ u16 gr_sport;
84726+ u16 gr_dport;
84727+ u8 used_accept:1;
84728+#endif
84729+
84730 #ifdef CONFIG_AUDIT
84731 unsigned audit_tty;
84732 unsigned audit_tty_log_passwd;
84733@@ -750,7 +775,7 @@ struct signal_struct {
84734 struct mutex cred_guard_mutex; /* guard against foreign influences on
84735 * credential calculations
84736 * (notably. ptrace) */
84737-};
84738+} __randomize_layout;
84739
84740 /*
84741 * Bits in flags field of signal_struct.
84742@@ -803,6 +828,14 @@ struct user_struct {
84743 struct key *session_keyring; /* UID's default session keyring */
84744 #endif
84745
84746+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84747+ unsigned char kernel_banned;
84748+#endif
84749+#ifdef CONFIG_GRKERNSEC_BRUTE
84750+ unsigned char suid_banned;
84751+ unsigned long suid_ban_expires;
84752+#endif
84753+
84754 /* Hash table maintenance information */
84755 struct hlist_node uidhash_node;
84756 kuid_t uid;
84757@@ -810,7 +843,7 @@ struct user_struct {
84758 #ifdef CONFIG_PERF_EVENTS
84759 atomic_long_t locked_vm;
84760 #endif
84761-};
84762+} __randomize_layout;
84763
84764 extern int uids_sysfs_init(void);
84765
84766@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84767 struct task_struct {
84768 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84769 void *stack;
84770+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84771+ void *lowmem_stack;
84772+#endif
84773 atomic_t usage;
84774 unsigned int flags; /* per process flags, defined below */
84775 unsigned int ptrace;
84776@@ -1405,8 +1441,8 @@ struct task_struct {
84777 struct list_head thread_node;
84778
84779 struct completion *vfork_done; /* for vfork() */
84780- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84781- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84782+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84783+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84784
84785 cputime_t utime, stime, utimescaled, stimescaled;
84786 cputime_t gtime;
84787@@ -1431,11 +1467,6 @@ struct task_struct {
84788 struct task_cputime cputime_expires;
84789 struct list_head cpu_timers[3];
84790
84791-/* process credentials */
84792- const struct cred __rcu *real_cred; /* objective and real subjective task
84793- * credentials (COW) */
84794- const struct cred __rcu *cred; /* effective (overridable) subjective task
84795- * credentials (COW) */
84796 char comm[TASK_COMM_LEN]; /* executable name excluding path
84797 - access with [gs]et_task_comm (which lock
84798 it with task_lock())
84799@@ -1453,6 +1484,10 @@ struct task_struct {
84800 #endif
84801 /* CPU-specific state of this task */
84802 struct thread_struct thread;
84803+/* thread_info moved to task_struct */
84804+#ifdef CONFIG_X86
84805+ struct thread_info tinfo;
84806+#endif
84807 /* filesystem information */
84808 struct fs_struct *fs;
84809 /* open file information */
84810@@ -1527,6 +1562,10 @@ struct task_struct {
84811 gfp_t lockdep_reclaim_gfp;
84812 #endif
84813
84814+/* process credentials */
84815+ const struct cred __rcu *real_cred; /* objective and real subjective task
84816+ * credentials (COW) */
84817+
84818 /* journalling filesystem info */
84819 void *journal_info;
84820
84821@@ -1565,6 +1604,10 @@ struct task_struct {
84822 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84823 struct list_head cg_list;
84824 #endif
84825+
84826+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84827+ * credentials (COW) */
84828+
84829 #ifdef CONFIG_FUTEX
84830 struct robust_list_head __user *robust_list;
84831 #ifdef CONFIG_COMPAT
84832@@ -1673,7 +1716,7 @@ struct task_struct {
84833 * Number of functions that haven't been traced
84834 * because of depth overrun.
84835 */
84836- atomic_t trace_overrun;
84837+ atomic_unchecked_t trace_overrun;
84838 /* Pause for the tracing */
84839 atomic_t tracing_graph_pause;
84840 #endif
84841@@ -1701,7 +1744,78 @@ struct task_struct {
84842 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84843 unsigned long task_state_change;
84844 #endif
84845-};
84846+
84847+#ifdef CONFIG_GRKERNSEC
84848+ /* grsecurity */
84849+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84850+ u64 exec_id;
84851+#endif
84852+#ifdef CONFIG_GRKERNSEC_SETXID
84853+ const struct cred *delayed_cred;
84854+#endif
84855+ struct dentry *gr_chroot_dentry;
84856+ struct acl_subject_label *acl;
84857+ struct acl_subject_label *tmpacl;
84858+ struct acl_role_label *role;
84859+ struct file *exec_file;
84860+ unsigned long brute_expires;
84861+ u16 acl_role_id;
84862+ u8 inherited;
84863+ /* is this the task that authenticated to the special role */
84864+ u8 acl_sp_role;
84865+ u8 is_writable;
84866+ u8 brute;
84867+ u8 gr_is_chrooted;
84868+#endif
84869+
84870+} __randomize_layout;
84871+
84872+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84873+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84874+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84875+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84876+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84877+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84878+
84879+#ifdef CONFIG_PAX_SOFTMODE
84880+extern int pax_softmode;
84881+#endif
84882+
84883+extern int pax_check_flags(unsigned long *);
84884+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84885+
84886+/* if tsk != current then task_lock must be held on it */
84887+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84888+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84889+{
84890+ if (likely(tsk->mm))
84891+ return tsk->mm->pax_flags;
84892+ else
84893+ return 0UL;
84894+}
84895+
84896+/* if tsk != current then task_lock must be held on it */
84897+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84898+{
84899+ if (likely(tsk->mm)) {
84900+ tsk->mm->pax_flags = flags;
84901+ return 0;
84902+ }
84903+ return -EINVAL;
84904+}
84905+#endif
84906+
84907+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84908+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84909+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84910+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84911+#endif
84912+
84913+struct path;
84914+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84915+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84916+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84917+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84918
84919 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84920 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84921@@ -1783,7 +1897,7 @@ struct pid_namespace;
84922 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84923 struct pid_namespace *ns);
84924
84925-static inline pid_t task_pid_nr(struct task_struct *tsk)
84926+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84927 {
84928 return tsk->pid;
84929 }
84930@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
84931
84932 extern void sched_clock_init(void);
84933
84934+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84935+static inline void populate_stack(void)
84936+{
84937+ struct task_struct *curtask = current;
84938+ int c;
84939+ int *ptr = curtask->stack;
84940+ int *end = curtask->stack + THREAD_SIZE;
84941+
84942+ while (ptr < end) {
84943+ c = *(volatile int *)ptr;
84944+ ptr += PAGE_SIZE/sizeof(int);
84945+ }
84946+}
84947+#else
84948+static inline void populate_stack(void)
84949+{
84950+}
84951+#endif
84952+
84953 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
84954 static inline void sched_clock_tick(void)
84955 {
84956@@ -2283,7 +2416,9 @@ void yield(void);
84957 extern struct exec_domain default_exec_domain;
84958
84959 union thread_union {
84960+#ifndef CONFIG_X86
84961 struct thread_info thread_info;
84962+#endif
84963 unsigned long stack[THREAD_SIZE/sizeof(long)];
84964 };
84965
84966@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
84967 */
84968
84969 extern struct task_struct *find_task_by_vpid(pid_t nr);
84970+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
84971 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
84972 struct pid_namespace *ns);
84973
84974@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
84975 extern void exit_itimers(struct signal_struct *);
84976 extern void flush_itimer_signals(void);
84977
84978-extern void do_group_exit(int);
84979+extern __noreturn void do_group_exit(int);
84980
84981 extern int do_execve(struct filename *,
84982 const char __user * const __user *,
84983@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
84984 #define task_stack_end_corrupted(task) \
84985 (*(end_of_stack(task)) != STACK_END_MAGIC)
84986
84987-static inline int object_is_on_stack(void *obj)
84988+static inline int object_starts_on_stack(const void *obj)
84989 {
84990- void *stack = task_stack_page(current);
84991+ const void *stack = task_stack_page(current);
84992
84993 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
84994 }
84995diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
84996index 596a0e0..bea77ec 100644
84997--- a/include/linux/sched/sysctl.h
84998+++ b/include/linux/sched/sysctl.h
84999@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85000 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85001
85002 extern int sysctl_max_map_count;
85003+extern unsigned long sysctl_heap_stack_gap;
85004
85005 extern unsigned int sysctl_sched_latency;
85006 extern unsigned int sysctl_sched_min_granularity;
85007diff --git a/include/linux/security.h b/include/linux/security.h
85008index ba96471..74fb3f6 100644
85009--- a/include/linux/security.h
85010+++ b/include/linux/security.h
85011@@ -27,6 +27,7 @@
85012 #include <linux/slab.h>
85013 #include <linux/err.h>
85014 #include <linux/string.h>
85015+#include <linux/grsecurity.h>
85016
85017 struct linux_binprm;
85018 struct cred;
85019@@ -116,8 +117,6 @@ struct seq_file;
85020
85021 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85022
85023-void reset_security_ops(void);
85024-
85025 #ifdef CONFIG_MMU
85026 extern unsigned long mmap_min_addr;
85027 extern unsigned long dac_mmap_min_addr;
85028@@ -1729,7 +1728,7 @@ struct security_operations {
85029 struct audit_context *actx);
85030 void (*audit_rule_free) (void *lsmrule);
85031 #endif /* CONFIG_AUDIT */
85032-};
85033+} __randomize_layout;
85034
85035 /* prototypes */
85036 extern int security_init(void);
85037diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85038index dc368b8..e895209 100644
85039--- a/include/linux/semaphore.h
85040+++ b/include/linux/semaphore.h
85041@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85042 }
85043
85044 extern void down(struct semaphore *sem);
85045-extern int __must_check down_interruptible(struct semaphore *sem);
85046+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85047 extern int __must_check down_killable(struct semaphore *sem);
85048 extern int __must_check down_trylock(struct semaphore *sem);
85049 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85050diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85051index cf6a9da..bd86b1f 100644
85052--- a/include/linux/seq_file.h
85053+++ b/include/linux/seq_file.h
85054@@ -27,6 +27,9 @@ struct seq_file {
85055 struct mutex lock;
85056 const struct seq_operations *op;
85057 int poll_event;
85058+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85059+ u64 exec_id;
85060+#endif
85061 #ifdef CONFIG_USER_NS
85062 struct user_namespace *user_ns;
85063 #endif
85064@@ -39,6 +42,7 @@ struct seq_operations {
85065 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85066 int (*show) (struct seq_file *m, void *v);
85067 };
85068+typedef struct seq_operations __no_const seq_operations_no_const;
85069
85070 #define SEQ_SKIP 1
85071
85072@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
85073
85074 char *mangle_path(char *s, const char *p, const char *esc);
85075 int seq_open(struct file *, const struct seq_operations *);
85076+int seq_open_restrict(struct file *, const struct seq_operations *);
85077 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85078 loff_t seq_lseek(struct file *, loff_t, int);
85079 int seq_release(struct inode *, struct file *);
85080@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85081 }
85082
85083 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85084+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85085 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85086 int single_release(struct inode *, struct file *);
85087 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85088diff --git a/include/linux/shm.h b/include/linux/shm.h
85089index 6fb8016..ab4465e 100644
85090--- a/include/linux/shm.h
85091+++ b/include/linux/shm.h
85092@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85093 /* The task created the shm object. NULL if the task is dead. */
85094 struct task_struct *shm_creator;
85095 struct list_head shm_clist; /* list by creator */
85096+#ifdef CONFIG_GRKERNSEC
85097+ u64 shm_createtime;
85098+ pid_t shm_lapid;
85099+#endif
85100 };
85101
85102 /* shm_mode upper byte flags */
85103diff --git a/include/linux/signal.h b/include/linux/signal.h
85104index ab1e039..ad4229e 100644
85105--- a/include/linux/signal.h
85106+++ b/include/linux/signal.h
85107@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85108 * know it'll be handled, so that they don't get converted to
85109 * SIGKILL or just silently dropped.
85110 */
85111- kernel_sigaction(sig, (__force __sighandler_t)2);
85112+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85113 }
85114
85115 static inline void disallow_signal(int sig)
85116diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85117index 85ab7d7..eb1585a 100644
85118--- a/include/linux/skbuff.h
85119+++ b/include/linux/skbuff.h
85120@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85121 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85122 int node);
85123 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85124-static inline struct sk_buff *alloc_skb(unsigned int size,
85125+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85126 gfp_t priority)
85127 {
85128 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85129@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85130 return skb->inner_transport_header - skb->inner_network_header;
85131 }
85132
85133-static inline int skb_network_offset(const struct sk_buff *skb)
85134+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85135 {
85136 return skb_network_header(skb) - skb->data;
85137 }
85138@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85139 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85140 */
85141 #ifndef NET_SKB_PAD
85142-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85143+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85144 #endif
85145
85146 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85147@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85148 int *err);
85149 unsigned int datagram_poll(struct file *file, struct socket *sock,
85150 struct poll_table_struct *wait);
85151-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85152+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85153 struct iov_iter *to, int size);
85154-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85155+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85156 struct msghdr *msg, int size)
85157 {
85158 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85159@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85160 nf_bridge_put(skb->nf_bridge);
85161 skb->nf_bridge = NULL;
85162 #endif
85163+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85164+ skb->nf_trace = 0;
85165+#endif
85166 }
85167
85168 static inline void nf_reset_trace(struct sk_buff *skb)
85169diff --git a/include/linux/slab.h b/include/linux/slab.h
85170index 9a139b6..aab37b4 100644
85171--- a/include/linux/slab.h
85172+++ b/include/linux/slab.h
85173@@ -14,15 +14,29 @@
85174 #include <linux/gfp.h>
85175 #include <linux/types.h>
85176 #include <linux/workqueue.h>
85177-
85178+#include <linux/err.h>
85179
85180 /*
85181 * Flags to pass to kmem_cache_create().
85182 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85183 */
85184 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85185+
85186+#ifdef CONFIG_PAX_USERCOPY_SLABS
85187+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85188+#else
85189+#define SLAB_USERCOPY 0x00000000UL
85190+#endif
85191+
85192 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85193 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85194+
85195+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85196+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85197+#else
85198+#define SLAB_NO_SANITIZE 0x00000000UL
85199+#endif
85200+
85201 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85202 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85203 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85204@@ -98,10 +112,13 @@
85205 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85206 * Both make kfree a no-op.
85207 */
85208-#define ZERO_SIZE_PTR ((void *)16)
85209+#define ZERO_SIZE_PTR \
85210+({ \
85211+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85212+ (void *)(-MAX_ERRNO-1L); \
85213+})
85214
85215-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85216- (unsigned long)ZERO_SIZE_PTR)
85217+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85218
85219 #include <linux/kmemleak.h>
85220
85221@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85222 void kfree(const void *);
85223 void kzfree(const void *);
85224 size_t ksize(const void *);
85225+const char *check_heap_object(const void *ptr, unsigned long n);
85226+bool is_usercopy_object(const void *ptr);
85227
85228 /*
85229 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85230@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85231 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85232 #endif
85233
85234+#ifdef CONFIG_PAX_USERCOPY_SLABS
85235+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85236+#endif
85237+
85238 /*
85239 * Figure out which kmalloc slab an allocation of a certain size
85240 * belongs to.
85241@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85242 * 2 = 120 .. 192 bytes
85243 * n = 2^(n-1) .. 2^n -1
85244 */
85245-static __always_inline int kmalloc_index(size_t size)
85246+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85247 {
85248 if (!size)
85249 return 0;
85250@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85251 }
85252 #endif /* !CONFIG_SLOB */
85253
85254-void *__kmalloc(size_t size, gfp_t flags);
85255+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85256 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85257
85258 #ifdef CONFIG_NUMA
85259-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85260+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85261 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85262 #else
85263-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85264+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85265 {
85266 return __kmalloc(size, flags);
85267 }
85268diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85269index b869d16..1453c73 100644
85270--- a/include/linux/slab_def.h
85271+++ b/include/linux/slab_def.h
85272@@ -40,7 +40,7 @@ struct kmem_cache {
85273 /* 4) cache creation/removal */
85274 const char *name;
85275 struct list_head list;
85276- int refcount;
85277+ atomic_t refcount;
85278 int object_size;
85279 int align;
85280
85281@@ -56,10 +56,14 @@ struct kmem_cache {
85282 unsigned long node_allocs;
85283 unsigned long node_frees;
85284 unsigned long node_overflow;
85285- atomic_t allochit;
85286- atomic_t allocmiss;
85287- atomic_t freehit;
85288- atomic_t freemiss;
85289+ atomic_unchecked_t allochit;
85290+ atomic_unchecked_t allocmiss;
85291+ atomic_unchecked_t freehit;
85292+ atomic_unchecked_t freemiss;
85293+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85294+ atomic_unchecked_t sanitized;
85295+ atomic_unchecked_t not_sanitized;
85296+#endif
85297
85298 /*
85299 * If debugging is enabled, then the allocator can add additional
85300diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85301index d82abd4..408c3a0 100644
85302--- a/include/linux/slub_def.h
85303+++ b/include/linux/slub_def.h
85304@@ -74,7 +74,7 @@ struct kmem_cache {
85305 struct kmem_cache_order_objects max;
85306 struct kmem_cache_order_objects min;
85307 gfp_t allocflags; /* gfp flags to use on each alloc */
85308- int refcount; /* Refcount for slab cache destroy */
85309+ atomic_t refcount; /* Refcount for slab cache destroy */
85310 void (*ctor)(void *);
85311 int inuse; /* Offset to metadata */
85312 int align; /* Alignment */
85313diff --git a/include/linux/smp.h b/include/linux/smp.h
85314index 93dff5f..933c561 100644
85315--- a/include/linux/smp.h
85316+++ b/include/linux/smp.h
85317@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85318 #endif
85319
85320 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85321+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85322 #define put_cpu() preempt_enable()
85323+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85324
85325 /*
85326 * Callback to arch code if there's nosmp or maxcpus=0 on the
85327diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85328index 46cca4c..3323536 100644
85329--- a/include/linux/sock_diag.h
85330+++ b/include/linux/sock_diag.h
85331@@ -11,7 +11,7 @@ struct sock;
85332 struct sock_diag_handler {
85333 __u8 family;
85334 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85335-};
85336+} __do_const;
85337
85338 int sock_diag_register(const struct sock_diag_handler *h);
85339 void sock_diag_unregister(const struct sock_diag_handler *h);
85340diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85341index 680f9a3..f13aeb0 100644
85342--- a/include/linux/sonet.h
85343+++ b/include/linux/sonet.h
85344@@ -7,7 +7,7 @@
85345 #include <uapi/linux/sonet.h>
85346
85347 struct k_sonet_stats {
85348-#define __HANDLE_ITEM(i) atomic_t i
85349+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85350 __SONET_ITEMS
85351 #undef __HANDLE_ITEM
85352 };
85353diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85354index 07d8e53..dc934c9 100644
85355--- a/include/linux/sunrpc/addr.h
85356+++ b/include/linux/sunrpc/addr.h
85357@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85358 {
85359 switch (sap->sa_family) {
85360 case AF_INET:
85361- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85362+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85363 case AF_INET6:
85364- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85365+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85366 }
85367 return 0;
85368 }
85369@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85370 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85371 const struct sockaddr *src)
85372 {
85373- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85374+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85375 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85376
85377 dsin->sin_family = ssin->sin_family;
85378@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85379 if (sa->sa_family != AF_INET6)
85380 return 0;
85381
85382- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85383+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85384 }
85385
85386 #endif /* _LINUX_SUNRPC_ADDR_H */
85387diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85388index 598ba80..d90cba6 100644
85389--- a/include/linux/sunrpc/clnt.h
85390+++ b/include/linux/sunrpc/clnt.h
85391@@ -100,7 +100,7 @@ struct rpc_procinfo {
85392 unsigned int p_timer; /* Which RTT timer to use */
85393 u32 p_statidx; /* Which procedure to account */
85394 const char * p_name; /* name of procedure */
85395-};
85396+} __do_const;
85397
85398 #ifdef __KERNEL__
85399
85400diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85401index 6f22cfe..9fd0909 100644
85402--- a/include/linux/sunrpc/svc.h
85403+++ b/include/linux/sunrpc/svc.h
85404@@ -420,7 +420,7 @@ struct svc_procedure {
85405 unsigned int pc_count; /* call count */
85406 unsigned int pc_cachetype; /* cache info (NFS) */
85407 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85408-};
85409+} __do_const;
85410
85411 /*
85412 * Function prototypes.
85413diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85414index 975da75..318c083 100644
85415--- a/include/linux/sunrpc/svc_rdma.h
85416+++ b/include/linux/sunrpc/svc_rdma.h
85417@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85418 extern unsigned int svcrdma_max_requests;
85419 extern unsigned int svcrdma_max_req_size;
85420
85421-extern atomic_t rdma_stat_recv;
85422-extern atomic_t rdma_stat_read;
85423-extern atomic_t rdma_stat_write;
85424-extern atomic_t rdma_stat_sq_starve;
85425-extern atomic_t rdma_stat_rq_starve;
85426-extern atomic_t rdma_stat_rq_poll;
85427-extern atomic_t rdma_stat_rq_prod;
85428-extern atomic_t rdma_stat_sq_poll;
85429-extern atomic_t rdma_stat_sq_prod;
85430+extern atomic_unchecked_t rdma_stat_recv;
85431+extern atomic_unchecked_t rdma_stat_read;
85432+extern atomic_unchecked_t rdma_stat_write;
85433+extern atomic_unchecked_t rdma_stat_sq_starve;
85434+extern atomic_unchecked_t rdma_stat_rq_starve;
85435+extern atomic_unchecked_t rdma_stat_rq_poll;
85436+extern atomic_unchecked_t rdma_stat_rq_prod;
85437+extern atomic_unchecked_t rdma_stat_sq_poll;
85438+extern atomic_unchecked_t rdma_stat_sq_prod;
85439
85440 #define RPCRDMA_VERSION 1
85441
85442diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85443index 8d71d65..f79586e 100644
85444--- a/include/linux/sunrpc/svcauth.h
85445+++ b/include/linux/sunrpc/svcauth.h
85446@@ -120,7 +120,7 @@ struct auth_ops {
85447 int (*release)(struct svc_rqst *rq);
85448 void (*domain_release)(struct auth_domain *);
85449 int (*set_client)(struct svc_rqst *rq);
85450-};
85451+} __do_const;
85452
85453 #define SVC_GARBAGE 1
85454 #define SVC_SYSERR 2
85455diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85456index e7a018e..49f8b17 100644
85457--- a/include/linux/swiotlb.h
85458+++ b/include/linux/swiotlb.h
85459@@ -60,7 +60,8 @@ extern void
85460
85461 extern void
85462 swiotlb_free_coherent(struct device *hwdev, size_t size,
85463- void *vaddr, dma_addr_t dma_handle);
85464+ void *vaddr, dma_addr_t dma_handle,
85465+ struct dma_attrs *attrs);
85466
85467 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85468 unsigned long offset, size_t size,
85469diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85470index 85893d7..4923581 100644
85471--- a/include/linux/syscalls.h
85472+++ b/include/linux/syscalls.h
85473@@ -99,10 +99,16 @@ union bpf_attr;
85474 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85475
85476 #define __SC_DECL(t, a) t a
85477+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85478 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85479 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85480 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85481-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85482+#define __SC_LONG(t, a) __typeof( \
85483+ __builtin_choose_expr( \
85484+ sizeof(t) > sizeof(int), \
85485+ (t) 0, \
85486+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85487+ )) a
85488 #define __SC_CAST(t, a) (t) a
85489 #define __SC_ARGS(t, a) a
85490 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85491@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
85492 asmlinkage long sys_fsync(unsigned int fd);
85493 asmlinkage long sys_fdatasync(unsigned int fd);
85494 asmlinkage long sys_bdflush(int func, long data);
85495-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85496- char __user *type, unsigned long flags,
85497+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85498+ const char __user *type, unsigned long flags,
85499 void __user *data);
85500-asmlinkage long sys_umount(char __user *name, int flags);
85501-asmlinkage long sys_oldumount(char __user *name);
85502+asmlinkage long sys_umount(const char __user *name, int flags);
85503+asmlinkage long sys_oldumount(const char __user *name);
85504 asmlinkage long sys_truncate(const char __user *path, long length);
85505 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85506 asmlinkage long sys_stat(const char __user *filename,
85507@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85508 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85509 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85510 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85511- struct sockaddr __user *, int);
85512+ struct sockaddr __user *, int) __intentional_overflow(0);
85513 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85514 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85515 unsigned int vlen, unsigned flags);
85516diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85517index 27b3b0b..e093dd9 100644
85518--- a/include/linux/syscore_ops.h
85519+++ b/include/linux/syscore_ops.h
85520@@ -16,7 +16,7 @@ struct syscore_ops {
85521 int (*suspend)(void);
85522 void (*resume)(void);
85523 void (*shutdown)(void);
85524-};
85525+} __do_const;
85526
85527 extern void register_syscore_ops(struct syscore_ops *ops);
85528 extern void unregister_syscore_ops(struct syscore_ops *ops);
85529diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85530index b7361f8..341a15a 100644
85531--- a/include/linux/sysctl.h
85532+++ b/include/linux/sysctl.h
85533@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85534
85535 extern int proc_dostring(struct ctl_table *, int,
85536 void __user *, size_t *, loff_t *);
85537+extern int proc_dostring_modpriv(struct ctl_table *, int,
85538+ void __user *, size_t *, loff_t *);
85539 extern int proc_dointvec(struct ctl_table *, int,
85540 void __user *, size_t *, loff_t *);
85541 extern int proc_dointvec_minmax(struct ctl_table *, int,
85542@@ -113,7 +115,8 @@ struct ctl_table
85543 struct ctl_table_poll *poll;
85544 void *extra1;
85545 void *extra2;
85546-};
85547+} __do_const __randomize_layout;
85548+typedef struct ctl_table __no_const ctl_table_no_const;
85549
85550 struct ctl_node {
85551 struct rb_node node;
85552diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85553index ddad161..a3efd26 100644
85554--- a/include/linux/sysfs.h
85555+++ b/include/linux/sysfs.h
85556@@ -34,7 +34,8 @@ struct attribute {
85557 struct lock_class_key *key;
85558 struct lock_class_key skey;
85559 #endif
85560-};
85561+} __do_const;
85562+typedef struct attribute __no_const attribute_no_const;
85563
85564 /**
85565 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85566@@ -63,7 +64,8 @@ struct attribute_group {
85567 struct attribute *, int);
85568 struct attribute **attrs;
85569 struct bin_attribute **bin_attrs;
85570-};
85571+} __do_const;
85572+typedef struct attribute_group __no_const attribute_group_no_const;
85573
85574 /**
85575 * Use these macros to make defining attributes easier. See include/linux/device.h
85576@@ -137,7 +139,8 @@ struct bin_attribute {
85577 char *, loff_t, size_t);
85578 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85579 struct vm_area_struct *vma);
85580-};
85581+} __do_const;
85582+typedef struct bin_attribute __no_const bin_attribute_no_const;
85583
85584 /**
85585 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85586diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85587index 387fa7d..3fcde6b 100644
85588--- a/include/linux/sysrq.h
85589+++ b/include/linux/sysrq.h
85590@@ -16,6 +16,7 @@
85591
85592 #include <linux/errno.h>
85593 #include <linux/types.h>
85594+#include <linux/compiler.h>
85595
85596 /* Possible values of bitmask for enabling sysrq functions */
85597 /* 0x0001 is reserved for enable everything */
85598@@ -33,7 +34,7 @@ struct sysrq_key_op {
85599 char *help_msg;
85600 char *action_msg;
85601 int enable_mask;
85602-};
85603+} __do_const;
85604
85605 #ifdef CONFIG_MAGIC_SYSRQ
85606
85607diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85608index ff307b5..f1a4468 100644
85609--- a/include/linux/thread_info.h
85610+++ b/include/linux/thread_info.h
85611@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85612 #error "no set_restore_sigmask() provided and default one won't work"
85613 #endif
85614
85615+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85616+
85617+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85618+{
85619+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85620+}
85621+
85622 #endif /* __KERNEL__ */
85623
85624 #endif /* _LINUX_THREAD_INFO_H */
85625diff --git a/include/linux/tty.h b/include/linux/tty.h
85626index 7d66ae5..0327149 100644
85627--- a/include/linux/tty.h
85628+++ b/include/linux/tty.h
85629@@ -202,7 +202,7 @@ struct tty_port {
85630 const struct tty_port_operations *ops; /* Port operations */
85631 spinlock_t lock; /* Lock protecting tty field */
85632 int blocked_open; /* Waiting to open */
85633- int count; /* Usage count */
85634+ atomic_t count; /* Usage count */
85635 wait_queue_head_t open_wait; /* Open waiters */
85636 wait_queue_head_t close_wait; /* Close waiters */
85637 wait_queue_head_t delta_msr_wait; /* Modem status change */
85638@@ -290,7 +290,7 @@ struct tty_struct {
85639 /* If the tty has a pending do_SAK, queue it here - akpm */
85640 struct work_struct SAK_work;
85641 struct tty_port *port;
85642-};
85643+} __randomize_layout;
85644
85645 /* Each of a tty's open files has private_data pointing to tty_file_private */
85646 struct tty_file_private {
85647@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85648 struct tty_struct *tty, struct file *filp);
85649 static inline int tty_port_users(struct tty_port *port)
85650 {
85651- return port->count + port->blocked_open;
85652+ return atomic_read(&port->count) + port->blocked_open;
85653 }
85654
85655 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85656diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85657index 92e337c..f46757b 100644
85658--- a/include/linux/tty_driver.h
85659+++ b/include/linux/tty_driver.h
85660@@ -291,7 +291,7 @@ struct tty_operations {
85661 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85662 #endif
85663 const struct file_operations *proc_fops;
85664-};
85665+} __do_const __randomize_layout;
85666
85667 struct tty_driver {
85668 int magic; /* magic number for this structure */
85669@@ -325,7 +325,7 @@ struct tty_driver {
85670
85671 const struct tty_operations *ops;
85672 struct list_head tty_drivers;
85673-};
85674+} __randomize_layout;
85675
85676 extern struct list_head tty_drivers;
85677
85678diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85679index 00c9d68..bc0188b 100644
85680--- a/include/linux/tty_ldisc.h
85681+++ b/include/linux/tty_ldisc.h
85682@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85683
85684 struct module *owner;
85685
85686- int refcount;
85687+ atomic_t refcount;
85688 };
85689
85690 struct tty_ldisc {
85691diff --git a/include/linux/types.h b/include/linux/types.h
85692index a0bb704..f511c77 100644
85693--- a/include/linux/types.h
85694+++ b/include/linux/types.h
85695@@ -177,10 +177,26 @@ typedef struct {
85696 int counter;
85697 } atomic_t;
85698
85699+#ifdef CONFIG_PAX_REFCOUNT
85700+typedef struct {
85701+ int counter;
85702+} atomic_unchecked_t;
85703+#else
85704+typedef atomic_t atomic_unchecked_t;
85705+#endif
85706+
85707 #ifdef CONFIG_64BIT
85708 typedef struct {
85709 long counter;
85710 } atomic64_t;
85711+
85712+#ifdef CONFIG_PAX_REFCOUNT
85713+typedef struct {
85714+ long counter;
85715+} atomic64_unchecked_t;
85716+#else
85717+typedef atomic64_t atomic64_unchecked_t;
85718+#endif
85719 #endif
85720
85721 struct list_head {
85722diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85723index ecd3319..8a36ded 100644
85724--- a/include/linux/uaccess.h
85725+++ b/include/linux/uaccess.h
85726@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85727 long ret; \
85728 mm_segment_t old_fs = get_fs(); \
85729 \
85730- set_fs(KERNEL_DS); \
85731 pagefault_disable(); \
85732- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85733- pagefault_enable(); \
85734+ set_fs(KERNEL_DS); \
85735+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85736 set_fs(old_fs); \
85737+ pagefault_enable(); \
85738 ret; \
85739 })
85740
85741diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85742index 2d1f9b6..d7a9fce 100644
85743--- a/include/linux/uidgid.h
85744+++ b/include/linux/uidgid.h
85745@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85746
85747 #endif /* CONFIG_USER_NS */
85748
85749+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85750+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85751+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85752+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85753+
85754 #endif /* _LINUX_UIDGID_H */
85755diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85756index 32c0e83..671eb35 100644
85757--- a/include/linux/uio_driver.h
85758+++ b/include/linux/uio_driver.h
85759@@ -67,7 +67,7 @@ struct uio_device {
85760 struct module *owner;
85761 struct device *dev;
85762 int minor;
85763- atomic_t event;
85764+ atomic_unchecked_t event;
85765 struct fasync_struct *async_queue;
85766 wait_queue_head_t wait;
85767 struct uio_info *info;
85768diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85769index 99c1b4d..562e6f3 100644
85770--- a/include/linux/unaligned/access_ok.h
85771+++ b/include/linux/unaligned/access_ok.h
85772@@ -4,34 +4,34 @@
85773 #include <linux/kernel.h>
85774 #include <asm/byteorder.h>
85775
85776-static inline u16 get_unaligned_le16(const void *p)
85777+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85778 {
85779- return le16_to_cpup((__le16 *)p);
85780+ return le16_to_cpup((const __le16 *)p);
85781 }
85782
85783-static inline u32 get_unaligned_le32(const void *p)
85784+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85785 {
85786- return le32_to_cpup((__le32 *)p);
85787+ return le32_to_cpup((const __le32 *)p);
85788 }
85789
85790-static inline u64 get_unaligned_le64(const void *p)
85791+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85792 {
85793- return le64_to_cpup((__le64 *)p);
85794+ return le64_to_cpup((const __le64 *)p);
85795 }
85796
85797-static inline u16 get_unaligned_be16(const void *p)
85798+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85799 {
85800- return be16_to_cpup((__be16 *)p);
85801+ return be16_to_cpup((const __be16 *)p);
85802 }
85803
85804-static inline u32 get_unaligned_be32(const void *p)
85805+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85806 {
85807- return be32_to_cpup((__be32 *)p);
85808+ return be32_to_cpup((const __be32 *)p);
85809 }
85810
85811-static inline u64 get_unaligned_be64(const void *p)
85812+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85813 {
85814- return be64_to_cpup((__be64 *)p);
85815+ return be64_to_cpup((const __be64 *)p);
85816 }
85817
85818 static inline void put_unaligned_le16(u16 val, void *p)
85819diff --git a/include/linux/usb.h b/include/linux/usb.h
85820index 058a769..c17a1c2c 100644
85821--- a/include/linux/usb.h
85822+++ b/include/linux/usb.h
85823@@ -566,7 +566,7 @@ struct usb_device {
85824 int maxchild;
85825
85826 u32 quirks;
85827- atomic_t urbnum;
85828+ atomic_unchecked_t urbnum;
85829
85830 unsigned long active_duration;
85831
85832@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85833
85834 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85835 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85836- void *data, __u16 size, int timeout);
85837+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85838 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85839 void *data, int len, int *actual_length, int timeout);
85840 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85841diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85842index 9fd9e48..e2c5f35 100644
85843--- a/include/linux/usb/renesas_usbhs.h
85844+++ b/include/linux/usb/renesas_usbhs.h
85845@@ -39,7 +39,7 @@ enum {
85846 */
85847 struct renesas_usbhs_driver_callback {
85848 int (*notify_hotplug)(struct platform_device *pdev);
85849-};
85850+} __no_const;
85851
85852 /*
85853 * callback functions for platform
85854diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85855index 8297e5b..0dfae27 100644
85856--- a/include/linux/user_namespace.h
85857+++ b/include/linux/user_namespace.h
85858@@ -39,7 +39,7 @@ struct user_namespace {
85859 struct key *persistent_keyring_register;
85860 struct rw_semaphore persistent_keyring_register_sem;
85861 #endif
85862-};
85863+} __randomize_layout;
85864
85865 extern struct user_namespace init_user_ns;
85866
85867diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85868index 5093f58..c103e58 100644
85869--- a/include/linux/utsname.h
85870+++ b/include/linux/utsname.h
85871@@ -25,7 +25,7 @@ struct uts_namespace {
85872 struct new_utsname name;
85873 struct user_namespace *user_ns;
85874 struct ns_common ns;
85875-};
85876+} __randomize_layout;
85877 extern struct uts_namespace init_uts_ns;
85878
85879 #ifdef CONFIG_UTS_NS
85880diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85881index 6f8fbcf..4efc177 100644
85882--- a/include/linux/vermagic.h
85883+++ b/include/linux/vermagic.h
85884@@ -25,9 +25,42 @@
85885 #define MODULE_ARCH_VERMAGIC ""
85886 #endif
85887
85888+#ifdef CONFIG_PAX_REFCOUNT
85889+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85890+#else
85891+#define MODULE_PAX_REFCOUNT ""
85892+#endif
85893+
85894+#ifdef CONSTIFY_PLUGIN
85895+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85896+#else
85897+#define MODULE_CONSTIFY_PLUGIN ""
85898+#endif
85899+
85900+#ifdef STACKLEAK_PLUGIN
85901+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85902+#else
85903+#define MODULE_STACKLEAK_PLUGIN ""
85904+#endif
85905+
85906+#ifdef RANDSTRUCT_PLUGIN
85907+#include <generated/randomize_layout_hash.h>
85908+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85909+#else
85910+#define MODULE_RANDSTRUCT_PLUGIN
85911+#endif
85912+
85913+#ifdef CONFIG_GRKERNSEC
85914+#define MODULE_GRSEC "GRSEC "
85915+#else
85916+#define MODULE_GRSEC ""
85917+#endif
85918+
85919 #define VERMAGIC_STRING \
85920 UTS_RELEASE " " \
85921 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
85922 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
85923- MODULE_ARCH_VERMAGIC
85924+ MODULE_ARCH_VERMAGIC \
85925+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
85926+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
85927
85928diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
85929index b483abd..af305ad 100644
85930--- a/include/linux/vga_switcheroo.h
85931+++ b/include/linux/vga_switcheroo.h
85932@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
85933
85934 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
85935
85936-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
85937+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
85938 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
85939-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85940+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
85941 #else
85942
85943 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
85944@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
85945
85946 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
85947
85948-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85949+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85950 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85951-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85952+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85953
85954 #endif
85955 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
85956diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
85957index b87696f..1d11de7 100644
85958--- a/include/linux/vmalloc.h
85959+++ b/include/linux/vmalloc.h
85960@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
85961 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
85962 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
85963 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
85964+
85965+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85966+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
85967+#endif
85968+
85969 /* bits [20..32] reserved for arch specific ioremap internals */
85970
85971 /*
85972@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
85973 unsigned long flags, pgprot_t prot);
85974 extern void vunmap(const void *addr);
85975
85976+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85977+extern void unmap_process_stacks(struct task_struct *task);
85978+#endif
85979+
85980 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
85981 unsigned long uaddr, void *kaddr,
85982 unsigned long size);
85983@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
85984
85985 /* for /dev/kmem */
85986 extern long vread(char *buf, char *addr, unsigned long count);
85987-extern long vwrite(char *buf, char *addr, unsigned long count);
85988+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
85989
85990 /*
85991 * Internals. Dont't use..
85992diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
85993index 82e7db7..f8ce3d0 100644
85994--- a/include/linux/vmstat.h
85995+++ b/include/linux/vmstat.h
85996@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
85997 /*
85998 * Zone based page accounting with per cpu differentials.
85999 */
86000-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86001+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86002
86003 static inline void zone_page_state_add(long x, struct zone *zone,
86004 enum zone_stat_item item)
86005 {
86006- atomic_long_add(x, &zone->vm_stat[item]);
86007- atomic_long_add(x, &vm_stat[item]);
86008+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86009+ atomic_long_add_unchecked(x, &vm_stat[item]);
86010 }
86011
86012-static inline unsigned long global_page_state(enum zone_stat_item item)
86013+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86014 {
86015- long x = atomic_long_read(&vm_stat[item]);
86016+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86017 #ifdef CONFIG_SMP
86018 if (x < 0)
86019 x = 0;
86020@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86021 return x;
86022 }
86023
86024-static inline unsigned long zone_page_state(struct zone *zone,
86025+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86026 enum zone_stat_item item)
86027 {
86028- long x = atomic_long_read(&zone->vm_stat[item]);
86029+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86030 #ifdef CONFIG_SMP
86031 if (x < 0)
86032 x = 0;
86033@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86034 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86035 enum zone_stat_item item)
86036 {
86037- long x = atomic_long_read(&zone->vm_stat[item]);
86038+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86039
86040 #ifdef CONFIG_SMP
86041 int cpu;
86042@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86043
86044 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86045 {
86046- atomic_long_inc(&zone->vm_stat[item]);
86047- atomic_long_inc(&vm_stat[item]);
86048+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86049+ atomic_long_inc_unchecked(&vm_stat[item]);
86050 }
86051
86052 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86053 {
86054- atomic_long_dec(&zone->vm_stat[item]);
86055- atomic_long_dec(&vm_stat[item]);
86056+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86057+ atomic_long_dec_unchecked(&vm_stat[item]);
86058 }
86059
86060 static inline void __inc_zone_page_state(struct page *page,
86061diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86062index 91b0a68..0e9adf6 100644
86063--- a/include/linux/xattr.h
86064+++ b/include/linux/xattr.h
86065@@ -28,7 +28,7 @@ struct xattr_handler {
86066 size_t size, int handler_flags);
86067 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86068 size_t size, int flags, int handler_flags);
86069-};
86070+} __do_const;
86071
86072 struct xattr {
86073 const char *name;
86074@@ -37,6 +37,9 @@ struct xattr {
86075 };
86076
86077 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86078+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86079+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86080+#endif
86081 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86082 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86083 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86084diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86085index 92dbbd3..13ab0b3 100644
86086--- a/include/linux/zlib.h
86087+++ b/include/linux/zlib.h
86088@@ -31,6 +31,7 @@
86089 #define _ZLIB_H
86090
86091 #include <linux/zconf.h>
86092+#include <linux/compiler.h>
86093
86094 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86095 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86096@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86097
86098 /* basic functions */
86099
86100-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86101+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86102 /*
86103 Returns the number of bytes that needs to be allocated for a per-
86104 stream workspace with the specified parameters. A pointer to this
86105diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86106index eb76cfd..9fd0e7c 100644
86107--- a/include/media/v4l2-dev.h
86108+++ b/include/media/v4l2-dev.h
86109@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86110 int (*mmap) (struct file *, struct vm_area_struct *);
86111 int (*open) (struct file *);
86112 int (*release) (struct file *);
86113-};
86114+} __do_const;
86115
86116 /*
86117 * Newer version of video_device, handled by videodev2.c
86118diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86119index ffb69da..040393e 100644
86120--- a/include/media/v4l2-device.h
86121+++ b/include/media/v4l2-device.h
86122@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86123 this function returns 0. If the name ends with a digit (e.g. cx18),
86124 then the name will be set to cx18-0 since cx180 looks really odd. */
86125 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86126- atomic_t *instance);
86127+ atomic_unchecked_t *instance);
86128
86129 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86130 Since the parent disappears this ensures that v4l2_dev doesn't have an
86131diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86132index 2a25dec..bf6dd8a 100644
86133--- a/include/net/9p/transport.h
86134+++ b/include/net/9p/transport.h
86135@@ -62,7 +62,7 @@ struct p9_trans_module {
86136 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86137 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86138 char *, char *, int , int, int, int);
86139-};
86140+} __do_const;
86141
86142 void v9fs_register_trans(struct p9_trans_module *m);
86143 void v9fs_unregister_trans(struct p9_trans_module *m);
86144diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86145index a175ba4..196eb8242 100644
86146--- a/include/net/af_unix.h
86147+++ b/include/net/af_unix.h
86148@@ -36,7 +36,7 @@ struct unix_skb_parms {
86149 u32 secid; /* Security ID */
86150 #endif
86151 u32 consumed;
86152-};
86153+} __randomize_layout;
86154
86155 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86156 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86157diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86158index d1bb342..e12f7d2 100644
86159--- a/include/net/bluetooth/l2cap.h
86160+++ b/include/net/bluetooth/l2cap.h
86161@@ -608,7 +608,7 @@ struct l2cap_ops {
86162 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86163 unsigned long hdr_len,
86164 unsigned long len, int nb);
86165-};
86166+} __do_const;
86167
86168 struct l2cap_conn {
86169 struct hci_conn *hcon;
86170diff --git a/include/net/bonding.h b/include/net/bonding.h
86171index 983a94b..7aa9b16 100644
86172--- a/include/net/bonding.h
86173+++ b/include/net/bonding.h
86174@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86175
86176 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86177 {
86178- atomic_long_inc(&dev->tx_dropped);
86179+ atomic_long_inc_unchecked(&dev->tx_dropped);
86180 dev_kfree_skb_any(skb);
86181 }
86182
86183diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86184index f2ae33d..c457cf0 100644
86185--- a/include/net/caif/cfctrl.h
86186+++ b/include/net/caif/cfctrl.h
86187@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86188 void (*radioset_rsp)(void);
86189 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86190 struct cflayer *client_layer);
86191-};
86192+} __no_const;
86193
86194 /* Link Setup Parameters for CAIF-Links. */
86195 struct cfctrl_link_param {
86196@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86197 struct cfctrl {
86198 struct cfsrvl serv;
86199 struct cfctrl_rsp res;
86200- atomic_t req_seq_no;
86201- atomic_t rsp_seq_no;
86202+ atomic_unchecked_t req_seq_no;
86203+ atomic_unchecked_t rsp_seq_no;
86204 struct list_head list;
86205 /* Protects from simultaneous access to first_req list */
86206 spinlock_t info_list_lock;
86207diff --git a/include/net/flow.h b/include/net/flow.h
86208index 8109a15..504466d 100644
86209--- a/include/net/flow.h
86210+++ b/include/net/flow.h
86211@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86212
86213 void flow_cache_flush(struct net *net);
86214 void flow_cache_flush_deferred(struct net *net);
86215-extern atomic_t flow_cache_genid;
86216+extern atomic_unchecked_t flow_cache_genid;
86217
86218 #endif
86219diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86220index 6c92415..3a352d8 100644
86221--- a/include/net/genetlink.h
86222+++ b/include/net/genetlink.h
86223@@ -130,7 +130,7 @@ struct genl_ops {
86224 u8 cmd;
86225 u8 internal_flags;
86226 u8 flags;
86227-};
86228+} __do_const;
86229
86230 int __genl_register_family(struct genl_family *family);
86231
86232diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86233index 734d9b5..48a9a4b 100644
86234--- a/include/net/gro_cells.h
86235+++ b/include/net/gro_cells.h
86236@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86237 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86238
86239 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86240- atomic_long_inc(&dev->rx_dropped);
86241+ atomic_long_inc_unchecked(&dev->rx_dropped);
86242 kfree_skb(skb);
86243 return;
86244 }
86245diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86246index 848e85c..051c7de 100644
86247--- a/include/net/inet_connection_sock.h
86248+++ b/include/net/inet_connection_sock.h
86249@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86250 int (*bind_conflict)(const struct sock *sk,
86251 const struct inet_bind_bucket *tb, bool relax);
86252 void (*mtu_reduced)(struct sock *sk);
86253-};
86254+} __do_const;
86255
86256 /** inet_connection_sock - INET connection oriented sock
86257 *
86258diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86259index 80479ab..0c3f647 100644
86260--- a/include/net/inetpeer.h
86261+++ b/include/net/inetpeer.h
86262@@ -47,7 +47,7 @@ struct inet_peer {
86263 */
86264 union {
86265 struct {
86266- atomic_t rid; /* Frag reception counter */
86267+ atomic_unchecked_t rid; /* Frag reception counter */
86268 };
86269 struct rcu_head rcu;
86270 struct inet_peer *gc_next;
86271diff --git a/include/net/ip.h b/include/net/ip.h
86272index 09cf5ae..ab62fcf 100644
86273--- a/include/net/ip.h
86274+++ b/include/net/ip.h
86275@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86276 }
86277 }
86278
86279-u32 ip_idents_reserve(u32 hash, int segs);
86280+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86281 void __ip_select_ident(struct iphdr *iph, int segs);
86282
86283 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86284diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86285index 09a819e..3ab9e14 100644
86286--- a/include/net/ip_fib.h
86287+++ b/include/net/ip_fib.h
86288@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86289
86290 #define FIB_RES_SADDR(net, res) \
86291 ((FIB_RES_NH(res).nh_saddr_genid == \
86292- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86293+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86294 FIB_RES_NH(res).nh_saddr : \
86295 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86296 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86297diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86298index 615b20b..fd4cbd8 100644
86299--- a/include/net/ip_vs.h
86300+++ b/include/net/ip_vs.h
86301@@ -534,7 +534,7 @@ struct ip_vs_conn {
86302 struct ip_vs_conn *control; /* Master control connection */
86303 atomic_t n_control; /* Number of controlled ones */
86304 struct ip_vs_dest *dest; /* real server */
86305- atomic_t in_pkts; /* incoming packet counter */
86306+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86307
86308 /* Packet transmitter for different forwarding methods. If it
86309 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86310@@ -682,7 +682,7 @@ struct ip_vs_dest {
86311 __be16 port; /* port number of the server */
86312 union nf_inet_addr addr; /* IP address of the server */
86313 volatile unsigned int flags; /* dest status flags */
86314- atomic_t conn_flags; /* flags to copy to conn */
86315+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86316 atomic_t weight; /* server weight */
86317
86318 atomic_t refcnt; /* reference counter */
86319@@ -928,11 +928,11 @@ struct netns_ipvs {
86320 /* ip_vs_lblc */
86321 int sysctl_lblc_expiration;
86322 struct ctl_table_header *lblc_ctl_header;
86323- struct ctl_table *lblc_ctl_table;
86324+ ctl_table_no_const *lblc_ctl_table;
86325 /* ip_vs_lblcr */
86326 int sysctl_lblcr_expiration;
86327 struct ctl_table_header *lblcr_ctl_header;
86328- struct ctl_table *lblcr_ctl_table;
86329+ ctl_table_no_const *lblcr_ctl_table;
86330 /* ip_vs_est */
86331 struct list_head est_list; /* estimator list */
86332 spinlock_t est_lock;
86333diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86334index 8d4f588..2e37ad2 100644
86335--- a/include/net/irda/ircomm_tty.h
86336+++ b/include/net/irda/ircomm_tty.h
86337@@ -33,6 +33,7 @@
86338 #include <linux/termios.h>
86339 #include <linux/timer.h>
86340 #include <linux/tty.h> /* struct tty_struct */
86341+#include <asm/local.h>
86342
86343 #include <net/irda/irias_object.h>
86344 #include <net/irda/ircomm_core.h>
86345diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86346index 714cc9a..ea05f3e 100644
86347--- a/include/net/iucv/af_iucv.h
86348+++ b/include/net/iucv/af_iucv.h
86349@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86350 struct iucv_sock_list {
86351 struct hlist_head head;
86352 rwlock_t lock;
86353- atomic_t autobind_name;
86354+ atomic_unchecked_t autobind_name;
86355 };
86356
86357 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86358diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86359index f3be818..bf46196 100644
86360--- a/include/net/llc_c_ac.h
86361+++ b/include/net/llc_c_ac.h
86362@@ -87,7 +87,7 @@
86363 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86364 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86365
86366-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86367+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86368
86369 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86370 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86371diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86372index 3948cf1..83b28c4 100644
86373--- a/include/net/llc_c_ev.h
86374+++ b/include/net/llc_c_ev.h
86375@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86376 return (struct llc_conn_state_ev *)skb->cb;
86377 }
86378
86379-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86380-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86381+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86382+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86383
86384 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86385 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86386diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86387index 48f3f89..0e92c50 100644
86388--- a/include/net/llc_c_st.h
86389+++ b/include/net/llc_c_st.h
86390@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86391 u8 next_state;
86392 const llc_conn_ev_qfyr_t *ev_qualifiers;
86393 const llc_conn_action_t *ev_actions;
86394-};
86395+} __do_const;
86396
86397 struct llc_conn_state {
86398 u8 current_state;
86399diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86400index a61b98c..aade1eb 100644
86401--- a/include/net/llc_s_ac.h
86402+++ b/include/net/llc_s_ac.h
86403@@ -23,7 +23,7 @@
86404 #define SAP_ACT_TEST_IND 9
86405
86406 /* All action functions must look like this */
86407-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86408+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86409
86410 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86411 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86412diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86413index c4359e2..76dbc4a 100644
86414--- a/include/net/llc_s_st.h
86415+++ b/include/net/llc_s_st.h
86416@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86417 llc_sap_ev_t ev;
86418 u8 next_state;
86419 const llc_sap_action_t *ev_actions;
86420-};
86421+} __do_const;
86422
86423 struct llc_sap_state {
86424 u8 curr_state;
86425diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86426index 29c7be8..746bd73 100644
86427--- a/include/net/mac80211.h
86428+++ b/include/net/mac80211.h
86429@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86430 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86431
86432 u32 (*get_expected_throughput)(void *priv_sta);
86433-};
86434+} __do_const;
86435
86436 static inline int rate_supported(struct ieee80211_sta *sta,
86437 enum ieee80211_band band,
86438diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86439index 76f7084..8f36e39 100644
86440--- a/include/net/neighbour.h
86441+++ b/include/net/neighbour.h
86442@@ -163,7 +163,7 @@ struct neigh_ops {
86443 void (*error_report)(struct neighbour *, struct sk_buff *);
86444 int (*output)(struct neighbour *, struct sk_buff *);
86445 int (*connected_output)(struct neighbour *, struct sk_buff *);
86446-};
86447+} __do_const;
86448
86449 struct pneigh_entry {
86450 struct pneigh_entry *next;
86451@@ -217,7 +217,7 @@ struct neigh_table {
86452 struct neigh_statistics __percpu *stats;
86453 struct neigh_hash_table __rcu *nht;
86454 struct pneigh_entry **phash_buckets;
86455-};
86456+} __randomize_layout;
86457
86458 enum {
86459 NEIGH_ARP_TABLE = 0,
86460diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86461index 2e8756b8..0bd0083 100644
86462--- a/include/net/net_namespace.h
86463+++ b/include/net/net_namespace.h
86464@@ -130,8 +130,8 @@ struct net {
86465 struct netns_ipvs *ipvs;
86466 #endif
86467 struct sock *diag_nlsk;
86468- atomic_t fnhe_genid;
86469-};
86470+ atomic_unchecked_t fnhe_genid;
86471+} __randomize_layout;
86472
86473 #include <linux/seq_file_net.h>
86474
86475@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86476 #define __net_init __init
86477 #define __net_exit __exit_refok
86478 #define __net_initdata __initdata
86479+#ifdef CONSTIFY_PLUGIN
86480 #define __net_initconst __initconst
86481+#else
86482+#define __net_initconst __initdata
86483+#endif
86484 #endif
86485
86486 struct pernet_operations {
86487@@ -297,7 +301,7 @@ struct pernet_operations {
86488 void (*exit_batch)(struct list_head *net_exit_list);
86489 int *id;
86490 size_t size;
86491-};
86492+} __do_const;
86493
86494 /*
86495 * Use these carefully. If you implement a network device and it
86496@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86497
86498 static inline int rt_genid_ipv4(struct net *net)
86499 {
86500- return atomic_read(&net->ipv4.rt_genid);
86501+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86502 }
86503
86504 static inline void rt_genid_bump_ipv4(struct net *net)
86505 {
86506- atomic_inc(&net->ipv4.rt_genid);
86507+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86508 }
86509
86510 extern void (*__fib6_flush_trees)(struct net *net);
86511@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86512
86513 static inline int fnhe_genid(struct net *net)
86514 {
86515- return atomic_read(&net->fnhe_genid);
86516+ return atomic_read_unchecked(&net->fnhe_genid);
86517 }
86518
86519 static inline void fnhe_genid_bump(struct net *net)
86520 {
86521- atomic_inc(&net->fnhe_genid);
86522+ atomic_inc_unchecked(&net->fnhe_genid);
86523 }
86524
86525 #endif /* __NET_NET_NAMESPACE_H */
86526diff --git a/include/net/netlink.h b/include/net/netlink.h
86527index 6415835..ab96d87 100644
86528--- a/include/net/netlink.h
86529+++ b/include/net/netlink.h
86530@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86531 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86532 {
86533 if (mark)
86534- skb_trim(skb, (unsigned char *) mark - skb->data);
86535+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86536 }
86537
86538 /**
86539diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86540index 29d6a94..235d3d84 100644
86541--- a/include/net/netns/conntrack.h
86542+++ b/include/net/netns/conntrack.h
86543@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86544 struct nf_proto_net {
86545 #ifdef CONFIG_SYSCTL
86546 struct ctl_table_header *ctl_table_header;
86547- struct ctl_table *ctl_table;
86548+ ctl_table_no_const *ctl_table;
86549 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86550 struct ctl_table_header *ctl_compat_header;
86551- struct ctl_table *ctl_compat_table;
86552+ ctl_table_no_const *ctl_compat_table;
86553 #endif
86554 #endif
86555 unsigned int users;
86556@@ -60,7 +60,7 @@ struct nf_ip_net {
86557 struct nf_icmp_net icmpv6;
86558 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86559 struct ctl_table_header *ctl_table_header;
86560- struct ctl_table *ctl_table;
86561+ ctl_table_no_const *ctl_table;
86562 #endif
86563 };
86564
86565diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86566index 0ffef1a..2ce1ceb 100644
86567--- a/include/net/netns/ipv4.h
86568+++ b/include/net/netns/ipv4.h
86569@@ -84,7 +84,7 @@ struct netns_ipv4 {
86570
86571 struct ping_group_range ping_group_range;
86572
86573- atomic_t dev_addr_genid;
86574+ atomic_unchecked_t dev_addr_genid;
86575
86576 #ifdef CONFIG_SYSCTL
86577 unsigned long *sysctl_local_reserved_ports;
86578@@ -98,6 +98,6 @@ struct netns_ipv4 {
86579 struct fib_rules_ops *mr_rules_ops;
86580 #endif
86581 #endif
86582- atomic_t rt_genid;
86583+ atomic_unchecked_t rt_genid;
86584 };
86585 #endif
86586diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86587index 69ae41f..4f94868 100644
86588--- a/include/net/netns/ipv6.h
86589+++ b/include/net/netns/ipv6.h
86590@@ -75,8 +75,8 @@ struct netns_ipv6 {
86591 struct fib_rules_ops *mr6_rules_ops;
86592 #endif
86593 #endif
86594- atomic_t dev_addr_genid;
86595- atomic_t fib6_sernum;
86596+ atomic_unchecked_t dev_addr_genid;
86597+ atomic_unchecked_t fib6_sernum;
86598 };
86599
86600 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86601diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86602index 730d82a..045f2c4 100644
86603--- a/include/net/netns/xfrm.h
86604+++ b/include/net/netns/xfrm.h
86605@@ -78,7 +78,7 @@ struct netns_xfrm {
86606
86607 /* flow cache part */
86608 struct flow_cache flow_cache_global;
86609- atomic_t flow_cache_genid;
86610+ atomic_unchecked_t flow_cache_genid;
86611 struct list_head flow_cache_gc_list;
86612 spinlock_t flow_cache_gc_lock;
86613 struct work_struct flow_cache_gc_work;
86614diff --git a/include/net/ping.h b/include/net/ping.h
86615index f074060..830fba0 100644
86616--- a/include/net/ping.h
86617+++ b/include/net/ping.h
86618@@ -54,7 +54,7 @@ struct ping_iter_state {
86619
86620 extern struct proto ping_prot;
86621 #if IS_ENABLED(CONFIG_IPV6)
86622-extern struct pingv6_ops pingv6_ops;
86623+extern struct pingv6_ops *pingv6_ops;
86624 #endif
86625
86626 struct pingfakehdr {
86627diff --git a/include/net/protocol.h b/include/net/protocol.h
86628index d6fcc1f..ca277058 100644
86629--- a/include/net/protocol.h
86630+++ b/include/net/protocol.h
86631@@ -49,7 +49,7 @@ struct net_protocol {
86632 * socket lookup?
86633 */
86634 icmp_strict_tag_validation:1;
86635-};
86636+} __do_const;
86637
86638 #if IS_ENABLED(CONFIG_IPV6)
86639 struct inet6_protocol {
86640@@ -62,7 +62,7 @@ struct inet6_protocol {
86641 u8 type, u8 code, int offset,
86642 __be32 info);
86643 unsigned int flags; /* INET6_PROTO_xxx */
86644-};
86645+} __do_const;
86646
86647 #define INET6_PROTO_NOPOLICY 0x1
86648 #define INET6_PROTO_FINAL 0x2
86649diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86650index e21b9f9..0191ef0 100644
86651--- a/include/net/rtnetlink.h
86652+++ b/include/net/rtnetlink.h
86653@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86654 int (*fill_slave_info)(struct sk_buff *skb,
86655 const struct net_device *dev,
86656 const struct net_device *slave_dev);
86657-};
86658+} __do_const;
86659
86660 int __rtnl_link_register(struct rtnl_link_ops *ops);
86661 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86662diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86663index 4a5b9a3..ca27d73 100644
86664--- a/include/net/sctp/checksum.h
86665+++ b/include/net/sctp/checksum.h
86666@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86667 unsigned int offset)
86668 {
86669 struct sctphdr *sh = sctp_hdr(skb);
86670- __le32 ret, old = sh->checksum;
86671- const struct skb_checksum_ops ops = {
86672+ __le32 ret, old = sh->checksum;
86673+ static const struct skb_checksum_ops ops = {
86674 .update = sctp_csum_update,
86675 .combine = sctp_csum_combine,
86676 };
86677diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86678index 487ef34..d457f98 100644
86679--- a/include/net/sctp/sm.h
86680+++ b/include/net/sctp/sm.h
86681@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86682 typedef struct {
86683 sctp_state_fn_t *fn;
86684 const char *name;
86685-} sctp_sm_table_entry_t;
86686+} __do_const sctp_sm_table_entry_t;
86687
86688 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86689 * currently in use.
86690@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86691 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86692
86693 /* Extern declarations for major data structures. */
86694-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86695+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86696
86697
86698 /* Get the size of a DATA chunk payload. */
86699diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86700index 2bb2fcf..d17c291 100644
86701--- a/include/net/sctp/structs.h
86702+++ b/include/net/sctp/structs.h
86703@@ -509,7 +509,7 @@ struct sctp_pf {
86704 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86705 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86706 struct sctp_af *af;
86707-};
86708+} __do_const;
86709
86710
86711 /* Structure to track chunk fragments that have been acked, but peer
86712diff --git a/include/net/sock.h b/include/net/sock.h
86713index 2210fec..2249ad0 100644
86714--- a/include/net/sock.h
86715+++ b/include/net/sock.h
86716@@ -362,7 +362,7 @@ struct sock {
86717 unsigned int sk_napi_id;
86718 unsigned int sk_ll_usec;
86719 #endif
86720- atomic_t sk_drops;
86721+ atomic_unchecked_t sk_drops;
86722 int sk_rcvbuf;
86723
86724 struct sk_filter __rcu *sk_filter;
86725@@ -1061,7 +1061,7 @@ struct proto {
86726 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86727 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86728 #endif
86729-};
86730+} __randomize_layout;
86731
86732 /*
86733 * Bits in struct cg_proto.flags
86734@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86735 page_counter_uncharge(&prot->memory_allocated, amt);
86736 }
86737
86738-static inline long
86739+static inline long __intentional_overflow(-1)
86740 sk_memory_allocated(const struct sock *sk)
86741 {
86742 struct proto *prot = sk->sk_prot;
86743@@ -1385,7 +1385,7 @@ struct sock_iocb {
86744 struct scm_cookie *scm;
86745 struct msghdr *msg, async_msg;
86746 struct kiocb *kiocb;
86747-};
86748+} __randomize_layout;
86749
86750 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86751 {
86752@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86753 }
86754
86755 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86756- char __user *from, char *to,
86757+ char __user *from, unsigned char *to,
86758 int copy, int offset)
86759 {
86760 if (skb->ip_summed == CHECKSUM_NONE) {
86761@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86762 }
86763 }
86764
86765-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86766+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86767
86768 /**
86769 * sk_page_frag - return an appropriate page_frag
86770diff --git a/include/net/tcp.h b/include/net/tcp.h
86771index 9d9111e..349c847 100644
86772--- a/include/net/tcp.h
86773+++ b/include/net/tcp.h
86774@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86775 void tcp_xmit_retransmit_queue(struct sock *);
86776 void tcp_simple_retransmit(struct sock *);
86777 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86778-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86779+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86780
86781 void tcp_send_probe0(struct sock *);
86782 void tcp_send_partial(struct sock *);
86783@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86784 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86785 */
86786 struct tcp_skb_cb {
86787- __u32 seq; /* Starting sequence number */
86788- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86789+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86790+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86791 union {
86792 /* Note : tcp_tw_isn is used in input path only
86793 * (isn chosen by tcp_timewait_state_process())
86794@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86795
86796 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86797 /* 1 byte hole */
86798- __u32 ack_seq; /* Sequence number ACK'd */
86799+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86800 union {
86801 struct inet_skb_parm h4;
86802 #if IS_ENABLED(CONFIG_IPV6)
86803diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86804index dc4865e..152ee4c 100644
86805--- a/include/net/xfrm.h
86806+++ b/include/net/xfrm.h
86807@@ -285,7 +285,6 @@ struct xfrm_dst;
86808 struct xfrm_policy_afinfo {
86809 unsigned short family;
86810 struct dst_ops *dst_ops;
86811- void (*garbage_collect)(struct net *net);
86812 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86813 const xfrm_address_t *saddr,
86814 const xfrm_address_t *daddr);
86815@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86816 struct net_device *dev,
86817 const struct flowi *fl);
86818 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86819-};
86820+} __do_const;
86821
86822 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86823 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86824@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86825 int (*transport_finish)(struct sk_buff *skb,
86826 int async);
86827 void (*local_error)(struct sk_buff *skb, u32 mtu);
86828-};
86829+} __do_const;
86830
86831 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86832 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86833@@ -437,7 +436,7 @@ struct xfrm_mode {
86834 struct module *owner;
86835 unsigned int encap;
86836 int flags;
86837-};
86838+} __do_const;
86839
86840 /* Flags for xfrm_mode. */
86841 enum {
86842@@ -534,7 +533,7 @@ struct xfrm_policy {
86843 struct timer_list timer;
86844
86845 struct flow_cache_object flo;
86846- atomic_t genid;
86847+ atomic_unchecked_t genid;
86848 u32 priority;
86849 u32 index;
86850 struct xfrm_mark mark;
86851@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86852 }
86853
86854 void xfrm_garbage_collect(struct net *net);
86855+void xfrm_garbage_collect_deferred(struct net *net);
86856
86857 #else
86858
86859@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86860 static inline void xfrm_garbage_collect(struct net *net)
86861 {
86862 }
86863+static inline void xfrm_garbage_collect_deferred(struct net *net)
86864+{
86865+}
86866 #endif
86867
86868 static __inline__
86869diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86870index 1017e0b..227aa4d 100644
86871--- a/include/rdma/iw_cm.h
86872+++ b/include/rdma/iw_cm.h
86873@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86874 int backlog);
86875
86876 int (*destroy_listen)(struct iw_cm_id *cm_id);
86877-};
86878+} __no_const;
86879
86880 /**
86881 * iw_create_cm_id - Create an IW CM identifier.
86882diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86883index 93d14da..734b3d8 100644
86884--- a/include/scsi/libfc.h
86885+++ b/include/scsi/libfc.h
86886@@ -771,6 +771,7 @@ struct libfc_function_template {
86887 */
86888 void (*disc_stop_final) (struct fc_lport *);
86889 };
86890+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86891
86892 /**
86893 * struct fc_disc - Discovery context
86894@@ -875,7 +876,7 @@ struct fc_lport {
86895 struct fc_vport *vport;
86896
86897 /* Operational Information */
86898- struct libfc_function_template tt;
86899+ libfc_function_template_no_const tt;
86900 u8 link_up;
86901 u8 qfull;
86902 enum fc_lport_state state;
86903diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86904index 3a4edd1..feb2e3e 100644
86905--- a/include/scsi/scsi_device.h
86906+++ b/include/scsi/scsi_device.h
86907@@ -185,9 +185,9 @@ struct scsi_device {
86908 unsigned int max_device_blocked; /* what device_blocked counts down from */
86909 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86910
86911- atomic_t iorequest_cnt;
86912- atomic_t iodone_cnt;
86913- atomic_t ioerr_cnt;
86914+ atomic_unchecked_t iorequest_cnt;
86915+ atomic_unchecked_t iodone_cnt;
86916+ atomic_unchecked_t ioerr_cnt;
86917
86918 struct device sdev_gendev,
86919 sdev_dev;
86920diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
86921index 007a0bc..7188db8 100644
86922--- a/include/scsi/scsi_transport_fc.h
86923+++ b/include/scsi/scsi_transport_fc.h
86924@@ -756,7 +756,8 @@ struct fc_function_template {
86925 unsigned long show_host_system_hostname:1;
86926
86927 unsigned long disable_target_scan:1;
86928-};
86929+} __do_const;
86930+typedef struct fc_function_template __no_const fc_function_template_no_const;
86931
86932
86933 /**
86934diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
86935index 396e8f7..b037e89 100644
86936--- a/include/sound/compress_driver.h
86937+++ b/include/sound/compress_driver.h
86938@@ -129,7 +129,7 @@ struct snd_compr_ops {
86939 struct snd_compr_caps *caps);
86940 int (*get_codec_caps) (struct snd_compr_stream *stream,
86941 struct snd_compr_codec_caps *codec);
86942-};
86943+} __no_const;
86944
86945 /**
86946 * struct snd_compr: Compressed device
86947diff --git a/include/sound/soc.h b/include/sound/soc.h
86948index ac8b333..59c3692 100644
86949--- a/include/sound/soc.h
86950+++ b/include/sound/soc.h
86951@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
86952 enum snd_soc_dapm_type, int);
86953
86954 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
86955-};
86956+} __do_const;
86957
86958 /* SoC platform interface */
86959 struct snd_soc_platform_driver {
86960@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
86961 const struct snd_compr_ops *compr_ops;
86962
86963 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
86964-};
86965+} __do_const;
86966
86967 struct snd_soc_dai_link_component {
86968 const char *name;
86969diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
86970index 672150b..9d4bec4 100644
86971--- a/include/target/target_core_base.h
86972+++ b/include/target/target_core_base.h
86973@@ -767,7 +767,7 @@ struct se_device {
86974 atomic_long_t write_bytes;
86975 /* Active commands on this virtual SE device */
86976 atomic_t simple_cmds;
86977- atomic_t dev_ordered_id;
86978+ atomic_unchecked_t dev_ordered_id;
86979 atomic_t dev_ordered_sync;
86980 atomic_t dev_qf_count;
86981 int export_count;
86982diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
86983new file mode 100644
86984index 0000000..fb634b7
86985--- /dev/null
86986+++ b/include/trace/events/fs.h
86987@@ -0,0 +1,53 @@
86988+#undef TRACE_SYSTEM
86989+#define TRACE_SYSTEM fs
86990+
86991+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
86992+#define _TRACE_FS_H
86993+
86994+#include <linux/fs.h>
86995+#include <linux/tracepoint.h>
86996+
86997+TRACE_EVENT(do_sys_open,
86998+
86999+ TP_PROTO(const char *filename, int flags, int mode),
87000+
87001+ TP_ARGS(filename, flags, mode),
87002+
87003+ TP_STRUCT__entry(
87004+ __string( filename, filename )
87005+ __field( int, flags )
87006+ __field( int, mode )
87007+ ),
87008+
87009+ TP_fast_assign(
87010+ __assign_str(filename, filename);
87011+ __entry->flags = flags;
87012+ __entry->mode = mode;
87013+ ),
87014+
87015+ TP_printk("\"%s\" %x %o",
87016+ __get_str(filename), __entry->flags, __entry->mode)
87017+);
87018+
87019+TRACE_EVENT(open_exec,
87020+
87021+ TP_PROTO(const char *filename),
87022+
87023+ TP_ARGS(filename),
87024+
87025+ TP_STRUCT__entry(
87026+ __string( filename, filename )
87027+ ),
87028+
87029+ TP_fast_assign(
87030+ __assign_str(filename, filename);
87031+ ),
87032+
87033+ TP_printk("\"%s\"",
87034+ __get_str(filename))
87035+);
87036+
87037+#endif /* _TRACE_FS_H */
87038+
87039+/* This part must be outside protection */
87040+#include <trace/define_trace.h>
87041diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87042index 3608beb..df39d8a 100644
87043--- a/include/trace/events/irq.h
87044+++ b/include/trace/events/irq.h
87045@@ -36,7 +36,7 @@ struct softirq_action;
87046 */
87047 TRACE_EVENT(irq_handler_entry,
87048
87049- TP_PROTO(int irq, struct irqaction *action),
87050+ TP_PROTO(int irq, const struct irqaction *action),
87051
87052 TP_ARGS(irq, action),
87053
87054@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87055 */
87056 TRACE_EVENT(irq_handler_exit,
87057
87058- TP_PROTO(int irq, struct irqaction *action, int ret),
87059+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87060
87061 TP_ARGS(irq, action, ret),
87062
87063diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87064index 7caf44c..23c6f27 100644
87065--- a/include/uapi/linux/a.out.h
87066+++ b/include/uapi/linux/a.out.h
87067@@ -39,6 +39,14 @@ enum machine_type {
87068 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87069 };
87070
87071+/* Constants for the N_FLAGS field */
87072+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87073+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87074+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87075+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87076+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87077+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87078+
87079 #if !defined (N_MAGIC)
87080 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87081 #endif
87082diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87083index 22b6ad3..aeba37e 100644
87084--- a/include/uapi/linux/bcache.h
87085+++ b/include/uapi/linux/bcache.h
87086@@ -5,6 +5,7 @@
87087 * Bcache on disk data structures
87088 */
87089
87090+#include <linux/compiler.h>
87091 #include <asm/types.h>
87092
87093 #define BITMASK(name, type, field, offset, size) \
87094@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87095 /* Btree keys - all units are in sectors */
87096
87097 struct bkey {
87098- __u64 high;
87099- __u64 low;
87100+ __u64 high __intentional_overflow(-1);
87101+ __u64 low __intentional_overflow(-1);
87102 __u64 ptr[];
87103 };
87104
87105diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87106index d876736..ccce5c0 100644
87107--- a/include/uapi/linux/byteorder/little_endian.h
87108+++ b/include/uapi/linux/byteorder/little_endian.h
87109@@ -42,51 +42,51 @@
87110
87111 static inline __le64 __cpu_to_le64p(const __u64 *p)
87112 {
87113- return (__force __le64)*p;
87114+ return (__force const __le64)*p;
87115 }
87116-static inline __u64 __le64_to_cpup(const __le64 *p)
87117+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87118 {
87119- return (__force __u64)*p;
87120+ return (__force const __u64)*p;
87121 }
87122 static inline __le32 __cpu_to_le32p(const __u32 *p)
87123 {
87124- return (__force __le32)*p;
87125+ return (__force const __le32)*p;
87126 }
87127 static inline __u32 __le32_to_cpup(const __le32 *p)
87128 {
87129- return (__force __u32)*p;
87130+ return (__force const __u32)*p;
87131 }
87132 static inline __le16 __cpu_to_le16p(const __u16 *p)
87133 {
87134- return (__force __le16)*p;
87135+ return (__force const __le16)*p;
87136 }
87137 static inline __u16 __le16_to_cpup(const __le16 *p)
87138 {
87139- return (__force __u16)*p;
87140+ return (__force const __u16)*p;
87141 }
87142 static inline __be64 __cpu_to_be64p(const __u64 *p)
87143 {
87144- return (__force __be64)__swab64p(p);
87145+ return (__force const __be64)__swab64p(p);
87146 }
87147 static inline __u64 __be64_to_cpup(const __be64 *p)
87148 {
87149- return __swab64p((__u64 *)p);
87150+ return __swab64p((const __u64 *)p);
87151 }
87152 static inline __be32 __cpu_to_be32p(const __u32 *p)
87153 {
87154- return (__force __be32)__swab32p(p);
87155+ return (__force const __be32)__swab32p(p);
87156 }
87157-static inline __u32 __be32_to_cpup(const __be32 *p)
87158+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87159 {
87160- return __swab32p((__u32 *)p);
87161+ return __swab32p((const __u32 *)p);
87162 }
87163 static inline __be16 __cpu_to_be16p(const __u16 *p)
87164 {
87165- return (__force __be16)__swab16p(p);
87166+ return (__force const __be16)__swab16p(p);
87167 }
87168 static inline __u16 __be16_to_cpup(const __be16 *p)
87169 {
87170- return __swab16p((__u16 *)p);
87171+ return __swab16p((const __u16 *)p);
87172 }
87173 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87174 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87175diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87176index 71e1d0e..6cc9caf 100644
87177--- a/include/uapi/linux/elf.h
87178+++ b/include/uapi/linux/elf.h
87179@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87180 #define PT_GNU_EH_FRAME 0x6474e550
87181
87182 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87183+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87184+
87185+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87186+
87187+/* Constants for the e_flags field */
87188+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87189+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87190+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87191+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87192+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87193+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87194
87195 /*
87196 * Extended Numbering
87197@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87198 #define DT_DEBUG 21
87199 #define DT_TEXTREL 22
87200 #define DT_JMPREL 23
87201+#define DT_FLAGS 30
87202+ #define DF_TEXTREL 0x00000004
87203 #define DT_ENCODING 32
87204 #define OLD_DT_LOOS 0x60000000
87205 #define DT_LOOS 0x6000000d
87206@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87207 #define PF_W 0x2
87208 #define PF_X 0x1
87209
87210+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87211+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87212+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87213+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87214+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87215+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87216+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87217+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87218+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87219+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87220+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87221+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87222+
87223 typedef struct elf32_phdr{
87224 Elf32_Word p_type;
87225 Elf32_Off p_offset;
87226@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87227 #define EI_OSABI 7
87228 #define EI_PAD 8
87229
87230+#define EI_PAX 14
87231+
87232 #define ELFMAG0 0x7f /* EI_MAG */
87233 #define ELFMAG1 'E'
87234 #define ELFMAG2 'L'
87235diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87236index aa169c4..6a2771d 100644
87237--- a/include/uapi/linux/personality.h
87238+++ b/include/uapi/linux/personality.h
87239@@ -30,6 +30,7 @@ enum {
87240 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87241 ADDR_NO_RANDOMIZE | \
87242 ADDR_COMPAT_LAYOUT | \
87243+ ADDR_LIMIT_3GB | \
87244 MMAP_PAGE_ZERO)
87245
87246 /*
87247diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87248index 7530e74..e714828 100644
87249--- a/include/uapi/linux/screen_info.h
87250+++ b/include/uapi/linux/screen_info.h
87251@@ -43,7 +43,8 @@ struct screen_info {
87252 __u16 pages; /* 0x32 */
87253 __u16 vesa_attributes; /* 0x34 */
87254 __u32 capabilities; /* 0x36 */
87255- __u8 _reserved[6]; /* 0x3a */
87256+ __u16 vesapm_size; /* 0x3a */
87257+ __u8 _reserved[4]; /* 0x3c */
87258 } __attribute__((packed));
87259
87260 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87261diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87262index 0e011eb..82681b1 100644
87263--- a/include/uapi/linux/swab.h
87264+++ b/include/uapi/linux/swab.h
87265@@ -43,7 +43,7 @@
87266 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87267 */
87268
87269-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87270+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87271 {
87272 #ifdef __HAVE_BUILTIN_BSWAP16__
87273 return __builtin_bswap16(val);
87274@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87275 #endif
87276 }
87277
87278-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87279+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87280 {
87281 #ifdef __HAVE_BUILTIN_BSWAP32__
87282 return __builtin_bswap32(val);
87283@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87284 #endif
87285 }
87286
87287-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87288+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87289 {
87290 #ifdef __HAVE_BUILTIN_BSWAP64__
87291 return __builtin_bswap64(val);
87292diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87293index 1590c49..5eab462 100644
87294--- a/include/uapi/linux/xattr.h
87295+++ b/include/uapi/linux/xattr.h
87296@@ -73,5 +73,9 @@
87297 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87298 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87299
87300+/* User namespace */
87301+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87302+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87303+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87304
87305 #endif /* _UAPI_LINUX_XATTR_H */
87306diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87307index f9466fa..f4e2b81 100644
87308--- a/include/video/udlfb.h
87309+++ b/include/video/udlfb.h
87310@@ -53,10 +53,10 @@ struct dlfb_data {
87311 u32 pseudo_palette[256];
87312 int blank_mode; /*one of FB_BLANK_ */
87313 /* blit-only rendering path metrics, exposed through sysfs */
87314- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87315- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87316- atomic_t bytes_sent; /* to usb, after compression including overhead */
87317- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87318+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87319+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87320+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87321+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87322 };
87323
87324 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87325diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87326index 30f5362..8ed8ac9 100644
87327--- a/include/video/uvesafb.h
87328+++ b/include/video/uvesafb.h
87329@@ -122,6 +122,7 @@ struct uvesafb_par {
87330 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87331 u8 pmi_setpal; /* PMI for palette changes */
87332 u16 *pmi_base; /* protected mode interface location */
87333+ u8 *pmi_code; /* protected mode code location */
87334 void *pmi_start;
87335 void *pmi_pal;
87336 u8 *vbe_state_orig; /*
87337diff --git a/init/Kconfig b/init/Kconfig
87338index 9afb971..27d6fca 100644
87339--- a/init/Kconfig
87340+++ b/init/Kconfig
87341@@ -1129,6 +1129,7 @@ endif # CGROUPS
87342
87343 config CHECKPOINT_RESTORE
87344 bool "Checkpoint/restore support" if EXPERT
87345+ depends on !GRKERNSEC
87346 default n
87347 help
87348 Enables additional kernel features in a sake of checkpoint/restore.
87349@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87350
87351 config COMPAT_BRK
87352 bool "Disable heap randomization"
87353- default y
87354+ default n
87355 help
87356 Randomizing heap placement makes heap exploits harder, but it
87357 also breaks ancient binaries (including anything libc5 based).
87358@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87359 config STOP_MACHINE
87360 bool
87361 default y
87362- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87363+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87364 help
87365 Need stop_machine() primitive.
87366
87367diff --git a/init/Makefile b/init/Makefile
87368index 7bc47ee..6da2dc7 100644
87369--- a/init/Makefile
87370+++ b/init/Makefile
87371@@ -2,6 +2,9 @@
87372 # Makefile for the linux kernel.
87373 #
87374
87375+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87376+asflags-y := $(GCC_PLUGINS_AFLAGS)
87377+
87378 obj-y := main.o version.o mounts.o
87379 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87380 obj-y += noinitramfs.o
87381diff --git a/init/do_mounts.c b/init/do_mounts.c
87382index eb41008..f5dbbf9 100644
87383--- a/init/do_mounts.c
87384+++ b/init/do_mounts.c
87385@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87386 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87387 {
87388 struct super_block *s;
87389- int err = sys_mount(name, "/root", fs, flags, data);
87390+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87391 if (err)
87392 return err;
87393
87394- sys_chdir("/root");
87395+ sys_chdir((const char __force_user *)"/root");
87396 s = current->fs->pwd.dentry->d_sb;
87397 ROOT_DEV = s->s_dev;
87398 printk(KERN_INFO
87399@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87400 va_start(args, fmt);
87401 vsprintf(buf, fmt, args);
87402 va_end(args);
87403- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87404+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87405 if (fd >= 0) {
87406 sys_ioctl(fd, FDEJECT, 0);
87407 sys_close(fd);
87408 }
87409 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87410- fd = sys_open("/dev/console", O_RDWR, 0);
87411+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87412 if (fd >= 0) {
87413 sys_ioctl(fd, TCGETS, (long)&termios);
87414 termios.c_lflag &= ~ICANON;
87415 sys_ioctl(fd, TCSETSF, (long)&termios);
87416- sys_read(fd, &c, 1);
87417+ sys_read(fd, (char __user *)&c, 1);
87418 termios.c_lflag |= ICANON;
87419 sys_ioctl(fd, TCSETSF, (long)&termios);
87420 sys_close(fd);
87421@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87422 mount_root();
87423 out:
87424 devtmpfs_mount("dev");
87425- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87426- sys_chroot(".");
87427+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87428+ sys_chroot((const char __force_user *)".");
87429 }
87430
87431 static bool is_tmpfs;
87432diff --git a/init/do_mounts.h b/init/do_mounts.h
87433index f5b978a..69dbfe8 100644
87434--- a/init/do_mounts.h
87435+++ b/init/do_mounts.h
87436@@ -15,15 +15,15 @@ extern int root_mountflags;
87437
87438 static inline int create_dev(char *name, dev_t dev)
87439 {
87440- sys_unlink(name);
87441- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87442+ sys_unlink((char __force_user *)name);
87443+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87444 }
87445
87446 #if BITS_PER_LONG == 32
87447 static inline u32 bstat(char *name)
87448 {
87449 struct stat64 stat;
87450- if (sys_stat64(name, &stat) != 0)
87451+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87452 return 0;
87453 if (!S_ISBLK(stat.st_mode))
87454 return 0;
87455@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87456 static inline u32 bstat(char *name)
87457 {
87458 struct stat stat;
87459- if (sys_newstat(name, &stat) != 0)
87460+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87461 return 0;
87462 if (!S_ISBLK(stat.st_mode))
87463 return 0;
87464diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87465index 3e0878e..8a9d7a0 100644
87466--- a/init/do_mounts_initrd.c
87467+++ b/init/do_mounts_initrd.c
87468@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87469 {
87470 sys_unshare(CLONE_FS | CLONE_FILES);
87471 /* stdin/stdout/stderr for /linuxrc */
87472- sys_open("/dev/console", O_RDWR, 0);
87473+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87474 sys_dup(0);
87475 sys_dup(0);
87476 /* move initrd over / and chdir/chroot in initrd root */
87477- sys_chdir("/root");
87478- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87479- sys_chroot(".");
87480+ sys_chdir((const char __force_user *)"/root");
87481+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87482+ sys_chroot((const char __force_user *)".");
87483 sys_setsid();
87484 return 0;
87485 }
87486@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87487 create_dev("/dev/root.old", Root_RAM0);
87488 /* mount initrd on rootfs' /root */
87489 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87490- sys_mkdir("/old", 0700);
87491- sys_chdir("/old");
87492+ sys_mkdir((const char __force_user *)"/old", 0700);
87493+ sys_chdir((const char __force_user *)"/old");
87494
87495 /* try loading default modules from initrd */
87496 load_default_modules();
87497@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87498 current->flags &= ~PF_FREEZER_SKIP;
87499
87500 /* move initrd to rootfs' /old */
87501- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87502+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87503 /* switch root and cwd back to / of rootfs */
87504- sys_chroot("..");
87505+ sys_chroot((const char __force_user *)"..");
87506
87507 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87508- sys_chdir("/old");
87509+ sys_chdir((const char __force_user *)"/old");
87510 return;
87511 }
87512
87513- sys_chdir("/");
87514+ sys_chdir((const char __force_user *)"/");
87515 ROOT_DEV = new_decode_dev(real_root_dev);
87516 mount_root();
87517
87518 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87519- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87520+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87521 if (!error)
87522 printk("okay\n");
87523 else {
87524- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87525+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87526 if (error == -ENOENT)
87527 printk("/initrd does not exist. Ignored.\n");
87528 else
87529 printk("failed\n");
87530 printk(KERN_NOTICE "Unmounting old root\n");
87531- sys_umount("/old", MNT_DETACH);
87532+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87533 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87534 if (fd < 0) {
87535 error = fd;
87536@@ -127,11 +127,11 @@ int __init initrd_load(void)
87537 * mounted in the normal path.
87538 */
87539 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87540- sys_unlink("/initrd.image");
87541+ sys_unlink((const char __force_user *)"/initrd.image");
87542 handle_initrd();
87543 return 1;
87544 }
87545 }
87546- sys_unlink("/initrd.image");
87547+ sys_unlink((const char __force_user *)"/initrd.image");
87548 return 0;
87549 }
87550diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87551index 8cb6db5..d729f50 100644
87552--- a/init/do_mounts_md.c
87553+++ b/init/do_mounts_md.c
87554@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87555 partitioned ? "_d" : "", minor,
87556 md_setup_args[ent].device_names);
87557
87558- fd = sys_open(name, 0, 0);
87559+ fd = sys_open((char __force_user *)name, 0, 0);
87560 if (fd < 0) {
87561 printk(KERN_ERR "md: open failed - cannot start "
87562 "array %s\n", name);
87563@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87564 * array without it
87565 */
87566 sys_close(fd);
87567- fd = sys_open(name, 0, 0);
87568+ fd = sys_open((char __force_user *)name, 0, 0);
87569 sys_ioctl(fd, BLKRRPART, 0);
87570 }
87571 sys_close(fd);
87572@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87573
87574 wait_for_device_probe();
87575
87576- fd = sys_open("/dev/md0", 0, 0);
87577+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87578 if (fd >= 0) {
87579 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87580 sys_close(fd);
87581diff --git a/init/init_task.c b/init/init_task.c
87582index ba0a7f36..2bcf1d5 100644
87583--- a/init/init_task.c
87584+++ b/init/init_task.c
87585@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87586 * Initial thread structure. Alignment of this is handled by a special
87587 * linker map entry.
87588 */
87589+#ifdef CONFIG_X86
87590+union thread_union init_thread_union __init_task_data;
87591+#else
87592 union thread_union init_thread_union __init_task_data =
87593 { INIT_THREAD_INFO(init_task) };
87594+#endif
87595diff --git a/init/initramfs.c b/init/initramfs.c
87596index ad1bd77..dca2c1b 100644
87597--- a/init/initramfs.c
87598+++ b/init/initramfs.c
87599@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87600
87601 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87602 while (count) {
87603- ssize_t rv = sys_write(fd, p, count);
87604+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87605
87606 if (rv < 0) {
87607 if (rv == -EINTR || rv == -EAGAIN)
87608@@ -107,7 +107,7 @@ static void __init free_hash(void)
87609 }
87610 }
87611
87612-static long __init do_utime(char *filename, time_t mtime)
87613+static long __init do_utime(char __force_user *filename, time_t mtime)
87614 {
87615 struct timespec t[2];
87616
87617@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87618 struct dir_entry *de, *tmp;
87619 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87620 list_del(&de->list);
87621- do_utime(de->name, de->mtime);
87622+ do_utime((char __force_user *)de->name, de->mtime);
87623 kfree(de->name);
87624 kfree(de);
87625 }
87626@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87627 if (nlink >= 2) {
87628 char *old = find_link(major, minor, ino, mode, collected);
87629 if (old)
87630- return (sys_link(old, collected) < 0) ? -1 : 1;
87631+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87632 }
87633 return 0;
87634 }
87635@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87636 {
87637 struct stat st;
87638
87639- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87640+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87641 if (S_ISDIR(st.st_mode))
87642- sys_rmdir(path);
87643+ sys_rmdir((char __force_user *)path);
87644 else
87645- sys_unlink(path);
87646+ sys_unlink((char __force_user *)path);
87647 }
87648 }
87649
87650@@ -338,7 +338,7 @@ static int __init do_name(void)
87651 int openflags = O_WRONLY|O_CREAT;
87652 if (ml != 1)
87653 openflags |= O_TRUNC;
87654- wfd = sys_open(collected, openflags, mode);
87655+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87656
87657 if (wfd >= 0) {
87658 sys_fchown(wfd, uid, gid);
87659@@ -350,17 +350,17 @@ static int __init do_name(void)
87660 }
87661 }
87662 } else if (S_ISDIR(mode)) {
87663- sys_mkdir(collected, mode);
87664- sys_chown(collected, uid, gid);
87665- sys_chmod(collected, mode);
87666+ sys_mkdir((char __force_user *)collected, mode);
87667+ sys_chown((char __force_user *)collected, uid, gid);
87668+ sys_chmod((char __force_user *)collected, mode);
87669 dir_add(collected, mtime);
87670 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87671 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87672 if (maybe_link() == 0) {
87673- sys_mknod(collected, mode, rdev);
87674- sys_chown(collected, uid, gid);
87675- sys_chmod(collected, mode);
87676- do_utime(collected, mtime);
87677+ sys_mknod((char __force_user *)collected, mode, rdev);
87678+ sys_chown((char __force_user *)collected, uid, gid);
87679+ sys_chmod((char __force_user *)collected, mode);
87680+ do_utime((char __force_user *)collected, mtime);
87681 }
87682 }
87683 return 0;
87684@@ -372,7 +372,7 @@ static int __init do_copy(void)
87685 if (xwrite(wfd, victim, body_len) != body_len)
87686 error("write error");
87687 sys_close(wfd);
87688- do_utime(vcollected, mtime);
87689+ do_utime((char __force_user *)vcollected, mtime);
87690 kfree(vcollected);
87691 eat(body_len);
87692 state = SkipIt;
87693@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87694 {
87695 collected[N_ALIGN(name_len) + body_len] = '\0';
87696 clean_path(collected, 0);
87697- sys_symlink(collected + N_ALIGN(name_len), collected);
87698- sys_lchown(collected, uid, gid);
87699- do_utime(collected, mtime);
87700+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87701+ sys_lchown((char __force_user *)collected, uid, gid);
87702+ do_utime((char __force_user *)collected, mtime);
87703 state = SkipIt;
87704 next_state = Reset;
87705 return 0;
87706diff --git a/init/main.c b/init/main.c
87707index 61b99376..1e346cb 100644
87708--- a/init/main.c
87709+++ b/init/main.c
87710@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87711 static inline void mark_rodata_ro(void) { }
87712 #endif
87713
87714+extern void grsecurity_init(void);
87715+
87716 /*
87717 * Debug helper: via this flag we know that we are in 'early bootup code'
87718 * where only the boot processor is running with IRQ disabled. This means
87719@@ -161,6 +163,85 @@ static int __init set_reset_devices(char *str)
87720
87721 __setup("reset_devices", set_reset_devices);
87722
87723+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87724+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87725+static int __init setup_grsec_proc_gid(char *str)
87726+{
87727+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87728+ return 1;
87729+}
87730+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87731+#endif
87732+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
87733+int grsec_enable_sysfs_restrict = 1;
87734+static int __init setup_grsec_sysfs_restrict(char *str)
87735+{
87736+ if (!simple_strtol(str, NULL, 0))
87737+ grsec_enable_sysfs_restrict = 0;
87738+ return 1;
87739+}
87740+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
87741+#endif
87742+
87743+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87744+unsigned long pax_user_shadow_base __read_only;
87745+EXPORT_SYMBOL(pax_user_shadow_base);
87746+extern char pax_enter_kernel_user[];
87747+extern char pax_exit_kernel_user[];
87748+#endif
87749+
87750+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87751+static int __init setup_pax_nouderef(char *str)
87752+{
87753+#ifdef CONFIG_X86_32
87754+ unsigned int cpu;
87755+ struct desc_struct *gdt;
87756+
87757+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87758+ gdt = get_cpu_gdt_table(cpu);
87759+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87760+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87761+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87762+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87763+ }
87764+ loadsegment(ds, __KERNEL_DS);
87765+ loadsegment(es, __KERNEL_DS);
87766+ loadsegment(ss, __KERNEL_DS);
87767+#else
87768+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87769+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87770+ clone_pgd_mask = ~(pgdval_t)0UL;
87771+ pax_user_shadow_base = 0UL;
87772+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87773+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87774+#endif
87775+
87776+ return 0;
87777+}
87778+early_param("pax_nouderef", setup_pax_nouderef);
87779+
87780+#ifdef CONFIG_X86_64
87781+static int __init setup_pax_weakuderef(char *str)
87782+{
87783+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87784+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87785+ return 1;
87786+}
87787+__setup("pax_weakuderef", setup_pax_weakuderef);
87788+#endif
87789+#endif
87790+
87791+#ifdef CONFIG_PAX_SOFTMODE
87792+int pax_softmode;
87793+
87794+static int __init setup_pax_softmode(char *str)
87795+{
87796+ get_option(&str, &pax_softmode);
87797+ return 1;
87798+}
87799+__setup("pax_softmode=", setup_pax_softmode);
87800+#endif
87801+
87802 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87803 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87804 static const char *panic_later, *panic_param;
87805@@ -735,7 +816,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87806 struct blacklist_entry *entry;
87807 char *fn_name;
87808
87809- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87810+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87811 if (!fn_name)
87812 return false;
87813
87814@@ -787,7 +868,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87815 {
87816 int count = preempt_count();
87817 int ret;
87818- char msgbuf[64];
87819+ const char *msg1 = "", *msg2 = "";
87820
87821 if (initcall_blacklisted(fn))
87822 return -EPERM;
87823@@ -797,18 +878,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87824 else
87825 ret = fn();
87826
87827- msgbuf[0] = 0;
87828-
87829 if (preempt_count() != count) {
87830- sprintf(msgbuf, "preemption imbalance ");
87831+ msg1 = " preemption imbalance";
87832 preempt_count_set(count);
87833 }
87834 if (irqs_disabled()) {
87835- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87836+ msg2 = " disabled interrupts";
87837 local_irq_enable();
87838 }
87839- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87840+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87841
87842+ add_latent_entropy();
87843 return ret;
87844 }
87845
87846@@ -914,8 +994,8 @@ static int run_init_process(const char *init_filename)
87847 {
87848 argv_init[0] = init_filename;
87849 return do_execve(getname_kernel(init_filename),
87850- (const char __user *const __user *)argv_init,
87851- (const char __user *const __user *)envp_init);
87852+ (const char __user *const __force_user *)argv_init,
87853+ (const char __user *const __force_user *)envp_init);
87854 }
87855
87856 static int try_to_run_init_process(const char *init_filename)
87857@@ -932,6 +1012,10 @@ static int try_to_run_init_process(const char *init_filename)
87858 return ret;
87859 }
87860
87861+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87862+extern int gr_init_ran;
87863+#endif
87864+
87865 static noinline void __init kernel_init_freeable(void);
87866
87867 static int __ref kernel_init(void *unused)
87868@@ -956,6 +1040,11 @@ static int __ref kernel_init(void *unused)
87869 ramdisk_execute_command, ret);
87870 }
87871
87872+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87873+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87874+ gr_init_ran = 1;
87875+#endif
87876+
87877 /*
87878 * We try each of these until one succeeds.
87879 *
87880@@ -1016,7 +1105,7 @@ static noinline void __init kernel_init_freeable(void)
87881 do_basic_setup();
87882
87883 /* Open the /dev/console on the rootfs, this should never fail */
87884- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87885+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87886 pr_err("Warning: unable to open an initial console.\n");
87887
87888 (void) sys_dup(0);
87889@@ -1029,11 +1118,13 @@ static noinline void __init kernel_init_freeable(void)
87890 if (!ramdisk_execute_command)
87891 ramdisk_execute_command = "/init";
87892
87893- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87894+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87895 ramdisk_execute_command = NULL;
87896 prepare_namespace();
87897 }
87898
87899+ grsecurity_init();
87900+
87901 /*
87902 * Ok, we have completed the initial bootup, and
87903 * we're essentially up and running. Get rid of the
87904diff --git a/ipc/compat.c b/ipc/compat.c
87905index 9b3c85f..1c4d897 100644
87906--- a/ipc/compat.c
87907+++ b/ipc/compat.c
87908@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87909 COMPAT_SHMLBA);
87910 if (err < 0)
87911 return err;
87912- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87913+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87914 }
87915 case SHMDT:
87916 return sys_shmdt(compat_ptr(ptr));
87917diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
87918index 8ad93c2..efd80f8 100644
87919--- a/ipc/ipc_sysctl.c
87920+++ b/ipc/ipc_sysctl.c
87921@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
87922 static int proc_ipc_dointvec(struct ctl_table *table, int write,
87923 void __user *buffer, size_t *lenp, loff_t *ppos)
87924 {
87925- struct ctl_table ipc_table;
87926+ ctl_table_no_const ipc_table;
87927
87928 memcpy(&ipc_table, table, sizeof(ipc_table));
87929 ipc_table.data = get_ipc(table);
87930@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
87931 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
87932 void __user *buffer, size_t *lenp, loff_t *ppos)
87933 {
87934- struct ctl_table ipc_table;
87935+ ctl_table_no_const ipc_table;
87936
87937 memcpy(&ipc_table, table, sizeof(ipc_table));
87938 ipc_table.data = get_ipc(table);
87939@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
87940 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87941 void __user *buffer, size_t *lenp, loff_t *ppos)
87942 {
87943- struct ctl_table ipc_table;
87944+ ctl_table_no_const ipc_table;
87945 memcpy(&ipc_table, table, sizeof(ipc_table));
87946 ipc_table.data = get_ipc(table);
87947
87948@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87949 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
87950 void __user *buffer, size_t *lenp, loff_t *ppos)
87951 {
87952- struct ctl_table ipc_table;
87953+ ctl_table_no_const ipc_table;
87954 int dummy = 0;
87955
87956 memcpy(&ipc_table, table, sizeof(ipc_table));
87957diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
87958index 68d4e95..1477ded 100644
87959--- a/ipc/mq_sysctl.c
87960+++ b/ipc/mq_sysctl.c
87961@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
87962 static int proc_mq_dointvec(struct ctl_table *table, int write,
87963 void __user *buffer, size_t *lenp, loff_t *ppos)
87964 {
87965- struct ctl_table mq_table;
87966+ ctl_table_no_const mq_table;
87967 memcpy(&mq_table, table, sizeof(mq_table));
87968 mq_table.data = get_mq(table);
87969
87970@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
87971 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
87972 void __user *buffer, size_t *lenp, loff_t *ppos)
87973 {
87974- struct ctl_table mq_table;
87975+ ctl_table_no_const mq_table;
87976 memcpy(&mq_table, table, sizeof(mq_table));
87977 mq_table.data = get_mq(table);
87978
87979diff --git a/ipc/mqueue.c b/ipc/mqueue.c
87980index 7635a1c..7432cb6 100644
87981--- a/ipc/mqueue.c
87982+++ b/ipc/mqueue.c
87983@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
87984 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
87985 info->attr.mq_msgsize);
87986
87987+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
87988 spin_lock(&mq_lock);
87989 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
87990 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
87991diff --git a/ipc/shm.c b/ipc/shm.c
87992index 19633b4..d454904 100644
87993--- a/ipc/shm.c
87994+++ b/ipc/shm.c
87995@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
87996 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
87997 #endif
87998
87999+#ifdef CONFIG_GRKERNSEC
88000+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88001+ const u64 shm_createtime, const kuid_t cuid,
88002+ const int shmid);
88003+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88004+ const u64 shm_createtime);
88005+#endif
88006+
88007 void shm_init_ns(struct ipc_namespace *ns)
88008 {
88009 ns->shm_ctlmax = SHMMAX;
88010@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88011 shp->shm_lprid = 0;
88012 shp->shm_atim = shp->shm_dtim = 0;
88013 shp->shm_ctim = get_seconds();
88014+#ifdef CONFIG_GRKERNSEC
88015+ shp->shm_createtime = ktime_get_ns();
88016+#endif
88017 shp->shm_segsz = size;
88018 shp->shm_nattch = 0;
88019 shp->shm_file = file;
88020@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88021 f_mode = FMODE_READ | FMODE_WRITE;
88022 }
88023 if (shmflg & SHM_EXEC) {
88024+
88025+#ifdef CONFIG_PAX_MPROTECT
88026+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88027+ goto out;
88028+#endif
88029+
88030 prot |= PROT_EXEC;
88031 acc_mode |= S_IXUGO;
88032 }
88033@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88034 if (err)
88035 goto out_unlock;
88036
88037+#ifdef CONFIG_GRKERNSEC
88038+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88039+ shp->shm_perm.cuid, shmid) ||
88040+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88041+ err = -EACCES;
88042+ goto out_unlock;
88043+ }
88044+#endif
88045+
88046 ipc_lock_object(&shp->shm_perm);
88047
88048 /* check if shm_destroy() is tearing down shp */
88049@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88050 path = shp->shm_file->f_path;
88051 path_get(&path);
88052 shp->shm_nattch++;
88053+#ifdef CONFIG_GRKERNSEC
88054+ shp->shm_lapid = current->pid;
88055+#endif
88056 size = i_size_read(path.dentry->d_inode);
88057 ipc_unlock_object(&shp->shm_perm);
88058 rcu_read_unlock();
88059diff --git a/ipc/util.c b/ipc/util.c
88060index 106bed0..f851429 100644
88061--- a/ipc/util.c
88062+++ b/ipc/util.c
88063@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88064 int (*show)(struct seq_file *, void *);
88065 };
88066
88067+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88068+
88069 /**
88070 * ipc_init - initialise ipc subsystem
88071 *
88072@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88073 granted_mode >>= 6;
88074 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88075 granted_mode >>= 3;
88076+
88077+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88078+ return -1;
88079+
88080 /* is there some bit set in requested_mode but not in granted_mode? */
88081 if ((requested_mode & ~granted_mode & 0007) &&
88082 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88083diff --git a/kernel/audit.c b/kernel/audit.c
88084index 72ab759..757deba 100644
88085--- a/kernel/audit.c
88086+++ b/kernel/audit.c
88087@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88088 3) suppressed due to audit_rate_limit
88089 4) suppressed due to audit_backlog_limit
88090 */
88091-static atomic_t audit_lost = ATOMIC_INIT(0);
88092+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88093
88094 /* The netlink socket. */
88095 static struct sock *audit_sock;
88096@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88097 unsigned long now;
88098 int print;
88099
88100- atomic_inc(&audit_lost);
88101+ atomic_inc_unchecked(&audit_lost);
88102
88103 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88104
88105@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88106 if (print) {
88107 if (printk_ratelimit())
88108 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88109- atomic_read(&audit_lost),
88110+ atomic_read_unchecked(&audit_lost),
88111 audit_rate_limit,
88112 audit_backlog_limit);
88113 audit_panic(message);
88114@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88115 s.pid = audit_pid;
88116 s.rate_limit = audit_rate_limit;
88117 s.backlog_limit = audit_backlog_limit;
88118- s.lost = atomic_read(&audit_lost);
88119+ s.lost = atomic_read_unchecked(&audit_lost);
88120 s.backlog = skb_queue_len(&audit_skb_queue);
88121 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88122 s.backlog_wait_time = audit_backlog_wait_time;
88123diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88124index 072566d..1190489 100644
88125--- a/kernel/auditsc.c
88126+++ b/kernel/auditsc.c
88127@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88128 }
88129
88130 /* global counter which is incremented every time something logs in */
88131-static atomic_t session_id = ATOMIC_INIT(0);
88132+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88133
88134 static int audit_set_loginuid_perm(kuid_t loginuid)
88135 {
88136@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88137
88138 /* are we setting or clearing? */
88139 if (uid_valid(loginuid))
88140- sessionid = (unsigned int)atomic_inc_return(&session_id);
88141+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88142
88143 task->sessionid = sessionid;
88144 task->loginuid = loginuid;
88145diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88146index a64e7a2..2e69448 100644
88147--- a/kernel/bpf/core.c
88148+++ b/kernel/bpf/core.c
88149@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88150 * random section of illegal instructions.
88151 */
88152 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88153- hdr = module_alloc(size);
88154+ hdr = module_alloc_exec(size);
88155 if (hdr == NULL)
88156 return NULL;
88157
88158 /* Fill space with illegal/arch-dep instructions. */
88159 bpf_fill_ill_insns(hdr, size);
88160
88161+ pax_open_kernel();
88162 hdr->pages = size / PAGE_SIZE;
88163+ pax_close_kernel();
88164+
88165 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88166 PAGE_SIZE - sizeof(*hdr));
88167 start = (prandom_u32() % hole) & ~(alignment - 1);
88168@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88169
88170 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88171 {
88172- module_memfree(hdr);
88173+ module_memfree_exec(hdr);
88174 }
88175 #endif /* CONFIG_BPF_JIT */
88176
88177diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88178index 536edc2..d28c85d 100644
88179--- a/kernel/bpf/syscall.c
88180+++ b/kernel/bpf/syscall.c
88181@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88182 int err;
88183
88184 /* the syscall is limited to root temporarily. This restriction will be
88185- * lifted when security audit is clean. Note that eBPF+tracing must have
88186- * this restriction, since it may pass kernel data to user space
88187+ * lifted by upstream when a half-assed security audit is clean. Note
88188+ * that eBPF+tracing must have this restriction, since it may pass
88189+ * kernel data to user space
88190 */
88191 if (!capable(CAP_SYS_ADMIN))
88192 return -EPERM;
88193+#ifdef CONFIG_GRKERNSEC
88194+ return -EPERM;
88195+#endif
88196
88197 if (!access_ok(VERIFY_READ, uattr, 1))
88198 return -EFAULT;
88199diff --git a/kernel/capability.c b/kernel/capability.c
88200index 989f5bf..d317ca0 100644
88201--- a/kernel/capability.c
88202+++ b/kernel/capability.c
88203@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88204 * before modification is attempted and the application
88205 * fails.
88206 */
88207+ if (tocopy > ARRAY_SIZE(kdata))
88208+ return -EFAULT;
88209+
88210 if (copy_to_user(dataptr, kdata, tocopy
88211 * sizeof(struct __user_cap_data_struct))) {
88212 return -EFAULT;
88213@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88214 int ret;
88215
88216 rcu_read_lock();
88217- ret = security_capable(__task_cred(t), ns, cap);
88218+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88219+ gr_task_is_capable(t, __task_cred(t), cap);
88220 rcu_read_unlock();
88221
88222- return (ret == 0);
88223+ return ret;
88224 }
88225
88226 /**
88227@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88228 int ret;
88229
88230 rcu_read_lock();
88231- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88232+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88233 rcu_read_unlock();
88234
88235- return (ret == 0);
88236+ return ret;
88237 }
88238
88239 /**
88240@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88241 BUG();
88242 }
88243
88244- if (security_capable(current_cred(), ns, cap) == 0) {
88245+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88246 current->flags |= PF_SUPERPRIV;
88247 return true;
88248 }
88249@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88250 }
88251 EXPORT_SYMBOL(ns_capable);
88252
88253+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88254+{
88255+ if (unlikely(!cap_valid(cap))) {
88256+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88257+ BUG();
88258+ }
88259+
88260+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88261+ current->flags |= PF_SUPERPRIV;
88262+ return true;
88263+ }
88264+ return false;
88265+}
88266+EXPORT_SYMBOL(ns_capable_nolog);
88267+
88268 /**
88269 * file_ns_capable - Determine if the file's opener had a capability in effect
88270 * @file: The file we want to check
88271@@ -427,6 +446,12 @@ bool capable(int cap)
88272 }
88273 EXPORT_SYMBOL(capable);
88274
88275+bool capable_nolog(int cap)
88276+{
88277+ return ns_capable_nolog(&init_user_ns, cap);
88278+}
88279+EXPORT_SYMBOL(capable_nolog);
88280+
88281 /**
88282 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88283 * @inode: The inode in question
88284@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88285 kgid_has_mapping(ns, inode->i_gid);
88286 }
88287 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88288+
88289+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88290+{
88291+ struct user_namespace *ns = current_user_ns();
88292+
88293+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88294+ kgid_has_mapping(ns, inode->i_gid);
88295+}
88296+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88297diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88298index 04cfe8a..adadcc0 100644
88299--- a/kernel/cgroup.c
88300+++ b/kernel/cgroup.c
88301@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88302 if (!pathbuf || !agentbuf)
88303 goto out;
88304
88305+ if (agentbuf[0] == '\0')
88306+ goto out;
88307+
88308 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88309 if (!path)
88310 goto out;
88311@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88312 struct task_struct *task;
88313 int count = 0;
88314
88315- seq_printf(seq, "css_set %p\n", cset);
88316+ seq_printf(seq, "css_set %pK\n", cset);
88317
88318 list_for_each_entry(task, &cset->tasks, cg_list) {
88319 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88320diff --git a/kernel/compat.c b/kernel/compat.c
88321index ebb3c36..1df606e 100644
88322--- a/kernel/compat.c
88323+++ b/kernel/compat.c
88324@@ -13,6 +13,7 @@
88325
88326 #include <linux/linkage.h>
88327 #include <linux/compat.h>
88328+#include <linux/module.h>
88329 #include <linux/errno.h>
88330 #include <linux/time.h>
88331 #include <linux/signal.h>
88332@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88333 mm_segment_t oldfs;
88334 long ret;
88335
88336- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88337+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88338 oldfs = get_fs();
88339 set_fs(KERNEL_DS);
88340 ret = hrtimer_nanosleep_restart(restart);
88341@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88342 oldfs = get_fs();
88343 set_fs(KERNEL_DS);
88344 ret = hrtimer_nanosleep(&tu,
88345- rmtp ? (struct timespec __user *)&rmt : NULL,
88346+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88347 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88348 set_fs(oldfs);
88349
88350@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88351 mm_segment_t old_fs = get_fs();
88352
88353 set_fs(KERNEL_DS);
88354- ret = sys_sigpending((old_sigset_t __user *) &s);
88355+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88356 set_fs(old_fs);
88357 if (ret == 0)
88358 ret = put_user(s, set);
88359@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88360 mm_segment_t old_fs = get_fs();
88361
88362 set_fs(KERNEL_DS);
88363- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88364+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88365 set_fs(old_fs);
88366
88367 if (!ret) {
88368@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88369 set_fs (KERNEL_DS);
88370 ret = sys_wait4(pid,
88371 (stat_addr ?
88372- (unsigned int __user *) &status : NULL),
88373- options, (struct rusage __user *) &r);
88374+ (unsigned int __force_user *) &status : NULL),
88375+ options, (struct rusage __force_user *) &r);
88376 set_fs (old_fs);
88377
88378 if (ret > 0) {
88379@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88380 memset(&info, 0, sizeof(info));
88381
88382 set_fs(KERNEL_DS);
88383- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88384- uru ? (struct rusage __user *)&ru : NULL);
88385+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88386+ uru ? (struct rusage __force_user *)&ru : NULL);
88387 set_fs(old_fs);
88388
88389 if ((ret < 0) || (info.si_signo == 0))
88390@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88391 oldfs = get_fs();
88392 set_fs(KERNEL_DS);
88393 err = sys_timer_settime(timer_id, flags,
88394- (struct itimerspec __user *) &newts,
88395- (struct itimerspec __user *) &oldts);
88396+ (struct itimerspec __force_user *) &newts,
88397+ (struct itimerspec __force_user *) &oldts);
88398 set_fs(oldfs);
88399 if (!err && old && put_compat_itimerspec(old, &oldts))
88400 return -EFAULT;
88401@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88402 oldfs = get_fs();
88403 set_fs(KERNEL_DS);
88404 err = sys_timer_gettime(timer_id,
88405- (struct itimerspec __user *) &ts);
88406+ (struct itimerspec __force_user *) &ts);
88407 set_fs(oldfs);
88408 if (!err && put_compat_itimerspec(setting, &ts))
88409 return -EFAULT;
88410@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88411 oldfs = get_fs();
88412 set_fs(KERNEL_DS);
88413 err = sys_clock_settime(which_clock,
88414- (struct timespec __user *) &ts);
88415+ (struct timespec __force_user *) &ts);
88416 set_fs(oldfs);
88417 return err;
88418 }
88419@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88420 oldfs = get_fs();
88421 set_fs(KERNEL_DS);
88422 err = sys_clock_gettime(which_clock,
88423- (struct timespec __user *) &ts);
88424+ (struct timespec __force_user *) &ts);
88425 set_fs(oldfs);
88426 if (!err && compat_put_timespec(&ts, tp))
88427 return -EFAULT;
88428@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88429
88430 oldfs = get_fs();
88431 set_fs(KERNEL_DS);
88432- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88433+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88434 set_fs(oldfs);
88435
88436 err = compat_put_timex(utp, &txc);
88437@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88438 oldfs = get_fs();
88439 set_fs(KERNEL_DS);
88440 err = sys_clock_getres(which_clock,
88441- (struct timespec __user *) &ts);
88442+ (struct timespec __force_user *) &ts);
88443 set_fs(oldfs);
88444 if (!err && tp && compat_put_timespec(&ts, tp))
88445 return -EFAULT;
88446@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88447 struct timespec tu;
88448 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88449
88450- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88451+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88452 oldfs = get_fs();
88453 set_fs(KERNEL_DS);
88454 err = clock_nanosleep_restart(restart);
88455@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88456 oldfs = get_fs();
88457 set_fs(KERNEL_DS);
88458 err = sys_clock_nanosleep(which_clock, flags,
88459- (struct timespec __user *) &in,
88460- (struct timespec __user *) &out);
88461+ (struct timespec __force_user *) &in,
88462+ (struct timespec __force_user *) &out);
88463 set_fs(oldfs);
88464
88465 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88466@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88467 mm_segment_t old_fs = get_fs();
88468
88469 set_fs(KERNEL_DS);
88470- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88471+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88472 set_fs(old_fs);
88473 if (compat_put_timespec(&t, interval))
88474 return -EFAULT;
88475diff --git a/kernel/configs.c b/kernel/configs.c
88476index c18b1f1..b9a0132 100644
88477--- a/kernel/configs.c
88478+++ b/kernel/configs.c
88479@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88480 struct proc_dir_entry *entry;
88481
88482 /* create the current config file */
88483+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88484+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88485+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88486+ &ikconfig_file_ops);
88487+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88488+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88489+ &ikconfig_file_ops);
88490+#endif
88491+#else
88492 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88493 &ikconfig_file_ops);
88494+#endif
88495+
88496 if (!entry)
88497 return -ENOMEM;
88498
88499diff --git a/kernel/cred.c b/kernel/cred.c
88500index e0573a4..26c0fd3 100644
88501--- a/kernel/cred.c
88502+++ b/kernel/cred.c
88503@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88504 validate_creds(cred);
88505 alter_cred_subscribers(cred, -1);
88506 put_cred(cred);
88507+
88508+#ifdef CONFIG_GRKERNSEC_SETXID
88509+ cred = (struct cred *) tsk->delayed_cred;
88510+ if (cred != NULL) {
88511+ tsk->delayed_cred = NULL;
88512+ validate_creds(cred);
88513+ alter_cred_subscribers(cred, -1);
88514+ put_cred(cred);
88515+ }
88516+#endif
88517 }
88518
88519 /**
88520@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88521 * Always returns 0 thus allowing this function to be tail-called at the end
88522 * of, say, sys_setgid().
88523 */
88524-int commit_creds(struct cred *new)
88525+static int __commit_creds(struct cred *new)
88526 {
88527 struct task_struct *task = current;
88528 const struct cred *old = task->real_cred;
88529@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88530
88531 get_cred(new); /* we will require a ref for the subj creds too */
88532
88533+ gr_set_role_label(task, new->uid, new->gid);
88534+
88535 /* dumpability changes */
88536 if (!uid_eq(old->euid, new->euid) ||
88537 !gid_eq(old->egid, new->egid) ||
88538@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88539 put_cred(old);
88540 return 0;
88541 }
88542+#ifdef CONFIG_GRKERNSEC_SETXID
88543+extern int set_user(struct cred *new);
88544+
88545+void gr_delayed_cred_worker(void)
88546+{
88547+ const struct cred *new = current->delayed_cred;
88548+ struct cred *ncred;
88549+
88550+ current->delayed_cred = NULL;
88551+
88552+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88553+ // from doing get_cred on it when queueing this
88554+ put_cred(new);
88555+ return;
88556+ } else if (new == NULL)
88557+ return;
88558+
88559+ ncred = prepare_creds();
88560+ if (!ncred)
88561+ goto die;
88562+ // uids
88563+ ncred->uid = new->uid;
88564+ ncred->euid = new->euid;
88565+ ncred->suid = new->suid;
88566+ ncred->fsuid = new->fsuid;
88567+ // gids
88568+ ncred->gid = new->gid;
88569+ ncred->egid = new->egid;
88570+ ncred->sgid = new->sgid;
88571+ ncred->fsgid = new->fsgid;
88572+ // groups
88573+ set_groups(ncred, new->group_info);
88574+ // caps
88575+ ncred->securebits = new->securebits;
88576+ ncred->cap_inheritable = new->cap_inheritable;
88577+ ncred->cap_permitted = new->cap_permitted;
88578+ ncred->cap_effective = new->cap_effective;
88579+ ncred->cap_bset = new->cap_bset;
88580+
88581+ if (set_user(ncred)) {
88582+ abort_creds(ncred);
88583+ goto die;
88584+ }
88585+
88586+ // from doing get_cred on it when queueing this
88587+ put_cred(new);
88588+
88589+ __commit_creds(ncred);
88590+ return;
88591+die:
88592+ // from doing get_cred on it when queueing this
88593+ put_cred(new);
88594+ do_group_exit(SIGKILL);
88595+}
88596+#endif
88597+
88598+int commit_creds(struct cred *new)
88599+{
88600+#ifdef CONFIG_GRKERNSEC_SETXID
88601+ int ret;
88602+ int schedule_it = 0;
88603+ struct task_struct *t;
88604+ unsigned oldsecurebits = current_cred()->securebits;
88605+
88606+ /* we won't get called with tasklist_lock held for writing
88607+ and interrupts disabled as the cred struct in that case is
88608+ init_cred
88609+ */
88610+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88611+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88612+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88613+ schedule_it = 1;
88614+ }
88615+ ret = __commit_creds(new);
88616+ if (schedule_it) {
88617+ rcu_read_lock();
88618+ read_lock(&tasklist_lock);
88619+ for (t = next_thread(current); t != current;
88620+ t = next_thread(t)) {
88621+ /* we'll check if the thread has uid 0 in
88622+ * the delayed worker routine
88623+ */
88624+ if (task_securebits(t) == oldsecurebits &&
88625+ t->delayed_cred == NULL) {
88626+ t->delayed_cred = get_cred(new);
88627+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88628+ set_tsk_need_resched(t);
88629+ }
88630+ }
88631+ read_unlock(&tasklist_lock);
88632+ rcu_read_unlock();
88633+ }
88634+
88635+ return ret;
88636+#else
88637+ return __commit_creds(new);
88638+#endif
88639+}
88640+
88641 EXPORT_SYMBOL(commit_creds);
88642
88643 /**
88644diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88645index ac5c0f9..4b1c6c2 100644
88646--- a/kernel/debug/debug_core.c
88647+++ b/kernel/debug/debug_core.c
88648@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88649 */
88650 static atomic_t masters_in_kgdb;
88651 static atomic_t slaves_in_kgdb;
88652-static atomic_t kgdb_break_tasklet_var;
88653+static atomic_unchecked_t kgdb_break_tasklet_var;
88654 atomic_t kgdb_setting_breakpoint;
88655
88656 struct task_struct *kgdb_usethread;
88657@@ -137,7 +137,7 @@ int kgdb_single_step;
88658 static pid_t kgdb_sstep_pid;
88659
88660 /* to keep track of the CPU which is doing the single stepping*/
88661-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88662+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88663
88664 /*
88665 * If you are debugging a problem where roundup (the collection of
88666@@ -552,7 +552,7 @@ return_normal:
88667 * kernel will only try for the value of sstep_tries before
88668 * giving up and continuing on.
88669 */
88670- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88671+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88672 (kgdb_info[cpu].task &&
88673 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88674 atomic_set(&kgdb_active, -1);
88675@@ -654,8 +654,8 @@ cpu_master_loop:
88676 }
88677
88678 kgdb_restore:
88679- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88680- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88681+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88682+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88683 if (kgdb_info[sstep_cpu].task)
88684 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88685 else
88686@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88687 static void kgdb_tasklet_bpt(unsigned long ing)
88688 {
88689 kgdb_breakpoint();
88690- atomic_set(&kgdb_break_tasklet_var, 0);
88691+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88692 }
88693
88694 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88695
88696 void kgdb_schedule_breakpoint(void)
88697 {
88698- if (atomic_read(&kgdb_break_tasklet_var) ||
88699+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88700 atomic_read(&kgdb_active) != -1 ||
88701 atomic_read(&kgdb_setting_breakpoint))
88702 return;
88703- atomic_inc(&kgdb_break_tasklet_var);
88704+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88705 tasklet_schedule(&kgdb_tasklet_breakpoint);
88706 }
88707 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88708diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88709index 60f6bb8..104bb07 100644
88710--- a/kernel/debug/kdb/kdb_main.c
88711+++ b/kernel/debug/kdb/kdb_main.c
88712@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88713 continue;
88714
88715 kdb_printf("%-20s%8u 0x%p ", mod->name,
88716- mod->core_size, (void *)mod);
88717+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88718 #ifdef CONFIG_MODULE_UNLOAD
88719 kdb_printf("%4d ", module_refcount(mod));
88720 #endif
88721@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88722 kdb_printf(" (Loading)");
88723 else
88724 kdb_printf(" (Live)");
88725- kdb_printf(" 0x%p", mod->module_core);
88726+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88727
88728 #ifdef CONFIG_MODULE_UNLOAD
88729 {
88730diff --git a/kernel/events/core.c b/kernel/events/core.c
88731index 19efcf133..7c05c93 100644
88732--- a/kernel/events/core.c
88733+++ b/kernel/events/core.c
88734@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88735 * 0 - disallow raw tracepoint access for unpriv
88736 * 1 - disallow cpu events for unpriv
88737 * 2 - disallow kernel profiling for unpriv
88738+ * 3 - disallow all unpriv perf event use
88739 */
88740-int sysctl_perf_event_paranoid __read_mostly = 1;
88741+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88742+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88743+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88744+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88745+#else
88746+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88747+#endif
88748
88749 /* Minimum for 512 kiB + 1 user control page */
88750 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88751@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88752
88753 tmp *= sysctl_perf_cpu_time_max_percent;
88754 do_div(tmp, 100);
88755- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88756+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88757 }
88758
88759 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88760@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88761 }
88762 }
88763
88764-static atomic64_t perf_event_id;
88765+static atomic64_unchecked_t perf_event_id;
88766
88767 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88768 enum event_type_t event_type);
88769@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88770
88771 static inline u64 perf_event_count(struct perf_event *event)
88772 {
88773- return local64_read(&event->count) + atomic64_read(&event->child_count);
88774+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88775 }
88776
88777 static u64 perf_event_read(struct perf_event *event)
88778@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88779 mutex_lock(&event->child_mutex);
88780 total += perf_event_read(event);
88781 *enabled += event->total_time_enabled +
88782- atomic64_read(&event->child_total_time_enabled);
88783+ atomic64_read_unchecked(&event->child_total_time_enabled);
88784 *running += event->total_time_running +
88785- atomic64_read(&event->child_total_time_running);
88786+ atomic64_read_unchecked(&event->child_total_time_running);
88787
88788 list_for_each_entry(child, &event->child_list, child_list) {
88789 total += perf_event_read(child);
88790@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88791 userpg->offset -= local64_read(&event->hw.prev_count);
88792
88793 userpg->time_enabled = enabled +
88794- atomic64_read(&event->child_total_time_enabled);
88795+ atomic64_read_unchecked(&event->child_total_time_enabled);
88796
88797 userpg->time_running = running +
88798- atomic64_read(&event->child_total_time_running);
88799+ atomic64_read_unchecked(&event->child_total_time_running);
88800
88801 arch_perf_update_userpage(userpg, now);
88802
88803@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88804
88805 /* Data. */
88806 sp = perf_user_stack_pointer(regs);
88807- rem = __output_copy_user(handle, (void *) sp, dump_size);
88808+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88809 dyn_size = dump_size - rem;
88810
88811 perf_output_skip(handle, rem);
88812@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88813 values[n++] = perf_event_count(event);
88814 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88815 values[n++] = enabled +
88816- atomic64_read(&event->child_total_time_enabled);
88817+ atomic64_read_unchecked(&event->child_total_time_enabled);
88818 }
88819 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88820 values[n++] = running +
88821- atomic64_read(&event->child_total_time_running);
88822+ atomic64_read_unchecked(&event->child_total_time_running);
88823 }
88824 if (read_format & PERF_FORMAT_ID)
88825 values[n++] = primary_event_id(event);
88826@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88827 event->parent = parent_event;
88828
88829 event->ns = get_pid_ns(task_active_pid_ns(current));
88830- event->id = atomic64_inc_return(&perf_event_id);
88831+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88832
88833 event->state = PERF_EVENT_STATE_INACTIVE;
88834
88835@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
88836 if (flags & ~PERF_FLAG_ALL)
88837 return -EINVAL;
88838
88839+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88840+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88841+ return -EACCES;
88842+#endif
88843+
88844 err = perf_copy_attr(attr_uptr, &attr);
88845 if (err)
88846 return err;
88847@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
88848 /*
88849 * Add back the child's count to the parent's count:
88850 */
88851- atomic64_add(child_val, &parent_event->child_count);
88852- atomic64_add(child_event->total_time_enabled,
88853+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88854+ atomic64_add_unchecked(child_event->total_time_enabled,
88855 &parent_event->child_total_time_enabled);
88856- atomic64_add(child_event->total_time_running,
88857+ atomic64_add_unchecked(child_event->total_time_running,
88858 &parent_event->child_total_time_running);
88859
88860 /*
88861diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88862index 569b2187..19940d9 100644
88863--- a/kernel/events/internal.h
88864+++ b/kernel/events/internal.h
88865@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88866 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88867 }
88868
88869-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88870+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88871 static inline unsigned long \
88872 func_name(struct perf_output_handle *handle, \
88873- const void *buf, unsigned long len) \
88874+ const void user *buf, unsigned long len) \
88875 { \
88876 unsigned long size, written; \
88877 \
88878@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88879 return 0;
88880 }
88881
88882-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88883+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88884
88885 static inline unsigned long
88886 memcpy_skip(void *dst, const void *src, unsigned long n)
88887@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88888 return 0;
88889 }
88890
88891-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88892+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88893
88894 #ifndef arch_perf_out_copy_user
88895 #define arch_perf_out_copy_user arch_perf_out_copy_user
88896@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88897 }
88898 #endif
88899
88900-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88901+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88902
88903 /* Callchain handling */
88904 extern struct perf_callchain_entry *
88905diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88906index cb346f2..e4dc317 100644
88907--- a/kernel/events/uprobes.c
88908+++ b/kernel/events/uprobes.c
88909@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88910 {
88911 struct page *page;
88912 uprobe_opcode_t opcode;
88913- int result;
88914+ long result;
88915
88916 pagefault_disable();
88917 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
88918diff --git a/kernel/exit.c b/kernel/exit.c
88919index 6806c55..a5fb128 100644
88920--- a/kernel/exit.c
88921+++ b/kernel/exit.c
88922@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
88923 struct task_struct *leader;
88924 int zap_leader;
88925 repeat:
88926+#ifdef CONFIG_NET
88927+ gr_del_task_from_ip_table(p);
88928+#endif
88929+
88930 /* don't need to get the RCU readlock here - the process is dead and
88931 * can't be modifying its own credentials. But shut RCU-lockdep up */
88932 rcu_read_lock();
88933@@ -655,6 +659,8 @@ void do_exit(long code)
88934 int group_dead;
88935 TASKS_RCU(int tasks_rcu_i);
88936
88937+ set_fs(USER_DS);
88938+
88939 profile_task_exit(tsk);
88940
88941 WARN_ON(blk_needs_flush_plug(tsk));
88942@@ -671,7 +677,6 @@ void do_exit(long code)
88943 * mm_release()->clear_child_tid() from writing to a user-controlled
88944 * kernel address.
88945 */
88946- set_fs(USER_DS);
88947
88948 ptrace_event(PTRACE_EVENT_EXIT, code);
88949
88950@@ -729,6 +734,9 @@ void do_exit(long code)
88951 tsk->exit_code = code;
88952 taskstats_exit(tsk, group_dead);
88953
88954+ gr_acl_handle_psacct(tsk, code);
88955+ gr_acl_handle_exit();
88956+
88957 exit_mm(tsk);
88958
88959 if (group_dead)
88960@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
88961 * Take down every thread in the group. This is called by fatal signals
88962 * as well as by sys_exit_group (below).
88963 */
88964-void
88965+__noreturn void
88966 do_group_exit(int exit_code)
88967 {
88968 struct signal_struct *sig = current->signal;
88969diff --git a/kernel/fork.c b/kernel/fork.c
88970index 4dc2dda..651add0 100644
88971--- a/kernel/fork.c
88972+++ b/kernel/fork.c
88973@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
88974 void thread_info_cache_init(void)
88975 {
88976 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
88977- THREAD_SIZE, 0, NULL);
88978+ THREAD_SIZE, SLAB_USERCOPY, NULL);
88979 BUG_ON(thread_info_cache == NULL);
88980 }
88981 # endif
88982 #endif
88983
88984+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88985+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88986+ int node, void **lowmem_stack)
88987+{
88988+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
88989+ void *ret = NULL;
88990+ unsigned int i;
88991+
88992+ *lowmem_stack = alloc_thread_info_node(tsk, node);
88993+ if (*lowmem_stack == NULL)
88994+ goto out;
88995+
88996+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
88997+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
88998+
88999+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89000+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89001+ if (ret == NULL) {
89002+ free_thread_info(*lowmem_stack);
89003+ *lowmem_stack = NULL;
89004+ }
89005+
89006+out:
89007+ return ret;
89008+}
89009+
89010+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89011+{
89012+ unmap_process_stacks(tsk);
89013+}
89014+#else
89015+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89016+ int node, void **lowmem_stack)
89017+{
89018+ return alloc_thread_info_node(tsk, node);
89019+}
89020+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89021+{
89022+ free_thread_info(ti);
89023+}
89024+#endif
89025+
89026 /* SLAB cache for signal_struct structures (tsk->signal) */
89027 static struct kmem_cache *signal_cachep;
89028
89029@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89030 /* SLAB cache for mm_struct structures (tsk->mm) */
89031 static struct kmem_cache *mm_cachep;
89032
89033-static void account_kernel_stack(struct thread_info *ti, int account)
89034+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89035 {
89036+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89037+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89038+#else
89039 struct zone *zone = page_zone(virt_to_page(ti));
89040+#endif
89041
89042 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89043 }
89044
89045 void free_task(struct task_struct *tsk)
89046 {
89047- account_kernel_stack(tsk->stack, -1);
89048+ account_kernel_stack(tsk, tsk->stack, -1);
89049 arch_release_thread_info(tsk->stack);
89050- free_thread_info(tsk->stack);
89051+ gr_free_thread_info(tsk, tsk->stack);
89052 rt_mutex_debug_task_free(tsk);
89053 ftrace_graph_exit_task(tsk);
89054 put_seccomp_filter(tsk);
89055@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89056 {
89057 struct task_struct *tsk;
89058 struct thread_info *ti;
89059+ void *lowmem_stack;
89060 int node = tsk_fork_get_node(orig);
89061 int err;
89062
89063@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89064 if (!tsk)
89065 return NULL;
89066
89067- ti = alloc_thread_info_node(tsk, node);
89068+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89069 if (!ti)
89070 goto free_tsk;
89071
89072@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89073 goto free_ti;
89074
89075 tsk->stack = ti;
89076+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89077+ tsk->lowmem_stack = lowmem_stack;
89078+#endif
89079 #ifdef CONFIG_SECCOMP
89080 /*
89081 * We must handle setting up seccomp filters once we're under
89082@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89083 set_task_stack_end_magic(tsk);
89084
89085 #ifdef CONFIG_CC_STACKPROTECTOR
89086- tsk->stack_canary = get_random_int();
89087+ tsk->stack_canary = pax_get_random_long();
89088 #endif
89089
89090 /*
89091@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89092 tsk->splice_pipe = NULL;
89093 tsk->task_frag.page = NULL;
89094
89095- account_kernel_stack(ti, 1);
89096+ account_kernel_stack(tsk, ti, 1);
89097
89098 return tsk;
89099
89100 free_ti:
89101- free_thread_info(ti);
89102+ gr_free_thread_info(tsk, ti);
89103 free_tsk:
89104 free_task_struct(tsk);
89105 return NULL;
89106 }
89107
89108 #ifdef CONFIG_MMU
89109-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89110+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89111+{
89112+ struct vm_area_struct *tmp;
89113+ unsigned long charge;
89114+ struct file *file;
89115+ int retval;
89116+
89117+ charge = 0;
89118+ if (mpnt->vm_flags & VM_ACCOUNT) {
89119+ unsigned long len = vma_pages(mpnt);
89120+
89121+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89122+ goto fail_nomem;
89123+ charge = len;
89124+ }
89125+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89126+ if (!tmp)
89127+ goto fail_nomem;
89128+ *tmp = *mpnt;
89129+ tmp->vm_mm = mm;
89130+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89131+ retval = vma_dup_policy(mpnt, tmp);
89132+ if (retval)
89133+ goto fail_nomem_policy;
89134+ if (anon_vma_fork(tmp, mpnt))
89135+ goto fail_nomem_anon_vma_fork;
89136+ tmp->vm_flags &= ~VM_LOCKED;
89137+ tmp->vm_next = tmp->vm_prev = NULL;
89138+ tmp->vm_mirror = NULL;
89139+ file = tmp->vm_file;
89140+ if (file) {
89141+ struct inode *inode = file_inode(file);
89142+ struct address_space *mapping = file->f_mapping;
89143+
89144+ get_file(file);
89145+ if (tmp->vm_flags & VM_DENYWRITE)
89146+ atomic_dec(&inode->i_writecount);
89147+ i_mmap_lock_write(mapping);
89148+ if (tmp->vm_flags & VM_SHARED)
89149+ atomic_inc(&mapping->i_mmap_writable);
89150+ flush_dcache_mmap_lock(mapping);
89151+ /* insert tmp into the share list, just after mpnt */
89152+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89153+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89154+ else
89155+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89156+ flush_dcache_mmap_unlock(mapping);
89157+ i_mmap_unlock_write(mapping);
89158+ }
89159+
89160+ /*
89161+ * Clear hugetlb-related page reserves for children. This only
89162+ * affects MAP_PRIVATE mappings. Faults generated by the child
89163+ * are not guaranteed to succeed, even if read-only
89164+ */
89165+ if (is_vm_hugetlb_page(tmp))
89166+ reset_vma_resv_huge_pages(tmp);
89167+
89168+ return tmp;
89169+
89170+fail_nomem_anon_vma_fork:
89171+ mpol_put(vma_policy(tmp));
89172+fail_nomem_policy:
89173+ kmem_cache_free(vm_area_cachep, tmp);
89174+fail_nomem:
89175+ vm_unacct_memory(charge);
89176+ return NULL;
89177+}
89178+
89179+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89180 {
89181 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89182 struct rb_node **rb_link, *rb_parent;
89183 int retval;
89184- unsigned long charge;
89185
89186 uprobe_start_dup_mmap();
89187 down_write(&oldmm->mmap_sem);
89188@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89189
89190 prev = NULL;
89191 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89192- struct file *file;
89193-
89194 if (mpnt->vm_flags & VM_DONTCOPY) {
89195 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89196 -vma_pages(mpnt));
89197 continue;
89198 }
89199- charge = 0;
89200- if (mpnt->vm_flags & VM_ACCOUNT) {
89201- unsigned long len = vma_pages(mpnt);
89202-
89203- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89204- goto fail_nomem;
89205- charge = len;
89206- }
89207- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89208- if (!tmp)
89209- goto fail_nomem;
89210- *tmp = *mpnt;
89211- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89212- retval = vma_dup_policy(mpnt, tmp);
89213- if (retval)
89214- goto fail_nomem_policy;
89215- tmp->vm_mm = mm;
89216- if (anon_vma_fork(tmp, mpnt))
89217- goto fail_nomem_anon_vma_fork;
89218- tmp->vm_flags &= ~VM_LOCKED;
89219- tmp->vm_next = tmp->vm_prev = NULL;
89220- file = tmp->vm_file;
89221- if (file) {
89222- struct inode *inode = file_inode(file);
89223- struct address_space *mapping = file->f_mapping;
89224-
89225- get_file(file);
89226- if (tmp->vm_flags & VM_DENYWRITE)
89227- atomic_dec(&inode->i_writecount);
89228- i_mmap_lock_write(mapping);
89229- if (tmp->vm_flags & VM_SHARED)
89230- atomic_inc(&mapping->i_mmap_writable);
89231- flush_dcache_mmap_lock(mapping);
89232- /* insert tmp into the share list, just after mpnt */
89233- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89234- vma_nonlinear_insert(tmp,
89235- &mapping->i_mmap_nonlinear);
89236- else
89237- vma_interval_tree_insert_after(tmp, mpnt,
89238- &mapping->i_mmap);
89239- flush_dcache_mmap_unlock(mapping);
89240- i_mmap_unlock_write(mapping);
89241+ tmp = dup_vma(mm, oldmm, mpnt);
89242+ if (!tmp) {
89243+ retval = -ENOMEM;
89244+ goto out;
89245 }
89246
89247 /*
89248@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89249 if (retval)
89250 goto out;
89251 }
89252+
89253+#ifdef CONFIG_PAX_SEGMEXEC
89254+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89255+ struct vm_area_struct *mpnt_m;
89256+
89257+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89258+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89259+
89260+ if (!mpnt->vm_mirror)
89261+ continue;
89262+
89263+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89264+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89265+ mpnt->vm_mirror = mpnt_m;
89266+ } else {
89267+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89268+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89269+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89270+ mpnt->vm_mirror->vm_mirror = mpnt;
89271+ }
89272+ }
89273+ BUG_ON(mpnt_m);
89274+ }
89275+#endif
89276+
89277 /* a new mm has just been created */
89278 arch_dup_mmap(oldmm, mm);
89279 retval = 0;
89280@@ -486,14 +589,6 @@ out:
89281 up_write(&oldmm->mmap_sem);
89282 uprobe_end_dup_mmap();
89283 return retval;
89284-fail_nomem_anon_vma_fork:
89285- mpol_put(vma_policy(tmp));
89286-fail_nomem_policy:
89287- kmem_cache_free(vm_area_cachep, tmp);
89288-fail_nomem:
89289- retval = -ENOMEM;
89290- vm_unacct_memory(charge);
89291- goto out;
89292 }
89293
89294 static inline int mm_alloc_pgd(struct mm_struct *mm)
89295@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89296 return ERR_PTR(err);
89297
89298 mm = get_task_mm(task);
89299- if (mm && mm != current->mm &&
89300- !ptrace_may_access(task, mode)) {
89301+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89302+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89303 mmput(mm);
89304 mm = ERR_PTR(-EACCES);
89305 }
89306@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89307 spin_unlock(&fs->lock);
89308 return -EAGAIN;
89309 }
89310- fs->users++;
89311+ atomic_inc(&fs->users);
89312 spin_unlock(&fs->lock);
89313 return 0;
89314 }
89315 tsk->fs = copy_fs_struct(fs);
89316 if (!tsk->fs)
89317 return -ENOMEM;
89318+ /* Carry through gr_chroot_dentry and is_chrooted instead
89319+ of recomputing it here. Already copied when the task struct
89320+ is duplicated. This allows pivot_root to not be treated as
89321+ a chroot
89322+ */
89323+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89324+
89325 return 0;
89326 }
89327
89328@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89329 * parts of the process environment (as per the clone
89330 * flags). The actual kick-off is left to the caller.
89331 */
89332-static struct task_struct *copy_process(unsigned long clone_flags,
89333+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89334 unsigned long stack_start,
89335 unsigned long stack_size,
89336 int __user *child_tidptr,
89337@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89338 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89339 #endif
89340 retval = -EAGAIN;
89341+
89342+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89343+
89344 if (atomic_read(&p->real_cred->user->processes) >=
89345 task_rlimit(p, RLIMIT_NPROC)) {
89346 if (p->real_cred->user != INIT_USER &&
89347@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89348 goto bad_fork_free_pid;
89349 }
89350
89351+ /* synchronizes with gr_set_acls()
89352+ we need to call this past the point of no return for fork()
89353+ */
89354+ gr_copy_label(p);
89355+
89356 if (likely(p->pid)) {
89357 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89358
89359@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89360 bad_fork_free:
89361 free_task(p);
89362 fork_out:
89363+ gr_log_forkfail(retval);
89364+
89365 return ERR_PTR(retval);
89366 }
89367
89368@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89369
89370 p = copy_process(clone_flags, stack_start, stack_size,
89371 child_tidptr, NULL, trace);
89372+ add_latent_entropy();
89373 /*
89374 * Do this prior waking up the new thread - the thread pointer
89375 * might get invalid after that point, if the thread exits quickly.
89376@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89377 if (clone_flags & CLONE_PARENT_SETTID)
89378 put_user(nr, parent_tidptr);
89379
89380+ gr_handle_brute_check();
89381+
89382 if (clone_flags & CLONE_VFORK) {
89383 p->vfork_done = &vfork;
89384 init_completion(&vfork);
89385@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89386 mm_cachep = kmem_cache_create("mm_struct",
89387 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89388 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89389- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89390+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89391 mmap_init();
89392 nsproxy_cache_init();
89393 }
89394@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89395 return 0;
89396
89397 /* don't need lock here; in the worst case we'll do useless copy */
89398- if (fs->users == 1)
89399+ if (atomic_read(&fs->users) == 1)
89400 return 0;
89401
89402 *new_fsp = copy_fs_struct(fs);
89403@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89404 fs = current->fs;
89405 spin_lock(&fs->lock);
89406 current->fs = new_fs;
89407- if (--fs->users)
89408+ gr_set_chroot_entries(current, &current->fs->root);
89409+ if (atomic_dec_return(&fs->users))
89410 new_fs = NULL;
89411 else
89412 new_fs = fs;
89413diff --git a/kernel/futex.c b/kernel/futex.c
89414index 63678b5..512f9af 100644
89415--- a/kernel/futex.c
89416+++ b/kernel/futex.c
89417@@ -201,7 +201,7 @@ struct futex_pi_state {
89418 atomic_t refcount;
89419
89420 union futex_key key;
89421-};
89422+} __randomize_layout;
89423
89424 /**
89425 * struct futex_q - The hashed futex queue entry, one per waiting task
89426@@ -235,7 +235,7 @@ struct futex_q {
89427 struct rt_mutex_waiter *rt_waiter;
89428 union futex_key *requeue_pi_key;
89429 u32 bitset;
89430-};
89431+} __randomize_layout;
89432
89433 static const struct futex_q futex_q_init = {
89434 /* list gets initialized in queue_me()*/
89435@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89436 struct page *page, *page_head;
89437 int err, ro = 0;
89438
89439+#ifdef CONFIG_PAX_SEGMEXEC
89440+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89441+ return -EFAULT;
89442+#endif
89443+
89444 /*
89445 * The futex address must be "naturally" aligned.
89446 */
89447@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89448
89449 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89450 {
89451- int ret;
89452+ unsigned long ret;
89453
89454 pagefault_disable();
89455 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89456@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89457 {
89458 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89459 u32 curval;
89460+ mm_segment_t oldfs;
89461
89462 /*
89463 * This will fail and we want it. Some arch implementations do
89464@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89465 * implementation, the non-functional ones will return
89466 * -ENOSYS.
89467 */
89468+ oldfs = get_fs();
89469+ set_fs(USER_DS);
89470 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89471 futex_cmpxchg_enabled = 1;
89472+ set_fs(oldfs);
89473 #endif
89474 }
89475
89476diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89477index 55c8c93..9ba7ad6 100644
89478--- a/kernel/futex_compat.c
89479+++ b/kernel/futex_compat.c
89480@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89481 return 0;
89482 }
89483
89484-static void __user *futex_uaddr(struct robust_list __user *entry,
89485+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89486 compat_long_t futex_offset)
89487 {
89488 compat_uptr_t base = ptr_to_compat(entry);
89489diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89490index b358a80..fc25240 100644
89491--- a/kernel/gcov/base.c
89492+++ b/kernel/gcov/base.c
89493@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89494 }
89495
89496 #ifdef CONFIG_MODULES
89497-static inline int within(void *addr, void *start, unsigned long size)
89498-{
89499- return ((addr >= start) && (addr < start + size));
89500-}
89501-
89502 /* Update list and generate events when modules are unloaded. */
89503 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89504 void *data)
89505@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89506
89507 /* Remove entries located in module from linked list. */
89508 while ((info = gcov_info_next(info))) {
89509- if (within(info, mod->module_core, mod->core_size)) {
89510+ if (within_module_core_rw((unsigned long)info, mod)) {
89511 gcov_info_unlink(prev, info);
89512 if (gcov_events_enabled)
89513 gcov_event(GCOV_REMOVE, info);
89514diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89515index 8069237..fe712d0 100644
89516--- a/kernel/irq/manage.c
89517+++ b/kernel/irq/manage.c
89518@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89519
89520 action_ret = handler_fn(desc, action);
89521 if (action_ret == IRQ_HANDLED)
89522- atomic_inc(&desc->threads_handled);
89523+ atomic_inc_unchecked(&desc->threads_handled);
89524
89525 wake_threads_waitq(desc);
89526 }
89527diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89528index e2514b0..de3dfe0 100644
89529--- a/kernel/irq/spurious.c
89530+++ b/kernel/irq/spurious.c
89531@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89532 * count. We just care about the count being
89533 * different than the one we saw before.
89534 */
89535- handled = atomic_read(&desc->threads_handled);
89536+ handled = atomic_read_unchecked(&desc->threads_handled);
89537 handled |= SPURIOUS_DEFERRED;
89538 if (handled != desc->threads_handled_last) {
89539 action_ret = IRQ_HANDLED;
89540diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89541index 9019f15..9a3c42e 100644
89542--- a/kernel/jump_label.c
89543+++ b/kernel/jump_label.c
89544@@ -14,6 +14,7 @@
89545 #include <linux/err.h>
89546 #include <linux/static_key.h>
89547 #include <linux/jump_label_ratelimit.h>
89548+#include <linux/mm.h>
89549
89550 #ifdef HAVE_JUMP_LABEL
89551
89552@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89553
89554 size = (((unsigned long)stop - (unsigned long)start)
89555 / sizeof(struct jump_entry));
89556+ pax_open_kernel();
89557 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89558+ pax_close_kernel();
89559 }
89560
89561 static void jump_label_update(struct static_key *key, int enable);
89562@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89563 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89564 struct jump_entry *iter;
89565
89566+ pax_open_kernel();
89567 for (iter = iter_start; iter < iter_stop; iter++) {
89568 if (within_module_init(iter->code, mod))
89569 iter->code = 0;
89570 }
89571+ pax_close_kernel();
89572 }
89573
89574 static int
89575diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89576index 5c5987f..bc502b0 100644
89577--- a/kernel/kallsyms.c
89578+++ b/kernel/kallsyms.c
89579@@ -11,6 +11,9 @@
89580 * Changed the compression method from stem compression to "table lookup"
89581 * compression (see scripts/kallsyms.c for a more complete description)
89582 */
89583+#ifdef CONFIG_GRKERNSEC_HIDESYM
89584+#define __INCLUDED_BY_HIDESYM 1
89585+#endif
89586 #include <linux/kallsyms.h>
89587 #include <linux/module.h>
89588 #include <linux/init.h>
89589@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89590
89591 static inline int is_kernel_inittext(unsigned long addr)
89592 {
89593+ if (system_state != SYSTEM_BOOTING)
89594+ return 0;
89595+
89596 if (addr >= (unsigned long)_sinittext
89597 && addr <= (unsigned long)_einittext)
89598 return 1;
89599 return 0;
89600 }
89601
89602+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89603+#ifdef CONFIG_MODULES
89604+static inline int is_module_text(unsigned long addr)
89605+{
89606+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89607+ return 1;
89608+
89609+ addr = ktla_ktva(addr);
89610+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89611+}
89612+#else
89613+static inline int is_module_text(unsigned long addr)
89614+{
89615+ return 0;
89616+}
89617+#endif
89618+#endif
89619+
89620 static inline int is_kernel_text(unsigned long addr)
89621 {
89622 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89623@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89624
89625 static inline int is_kernel(unsigned long addr)
89626 {
89627+
89628+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89629+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89630+ return 1;
89631+
89632+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89633+#else
89634 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89635+#endif
89636+
89637 return 1;
89638 return in_gate_area_no_mm(addr);
89639 }
89640
89641 static int is_ksym_addr(unsigned long addr)
89642 {
89643+
89644+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89645+ if (is_module_text(addr))
89646+ return 0;
89647+#endif
89648+
89649 if (all_var)
89650 return is_kernel(addr);
89651
89652@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89653
89654 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89655 {
89656- iter->name[0] = '\0';
89657 iter->nameoff = get_symbol_offset(new_pos);
89658 iter->pos = new_pos;
89659 }
89660@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89661 {
89662 struct kallsym_iter *iter = m->private;
89663
89664+#ifdef CONFIG_GRKERNSEC_HIDESYM
89665+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89666+ return 0;
89667+#endif
89668+
89669 /* Some debugging symbols have no name. Ignore them. */
89670 if (!iter->name[0])
89671 return 0;
89672@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89673 */
89674 type = iter->exported ? toupper(iter->type) :
89675 tolower(iter->type);
89676+
89677 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89678 type, iter->name, iter->module_name);
89679 } else
89680diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89681index 0aa69ea..a7fcafb 100644
89682--- a/kernel/kcmp.c
89683+++ b/kernel/kcmp.c
89684@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89685 struct task_struct *task1, *task2;
89686 int ret;
89687
89688+#ifdef CONFIG_GRKERNSEC
89689+ return -ENOSYS;
89690+#endif
89691+
89692 rcu_read_lock();
89693
89694 /*
89695diff --git a/kernel/kexec.c b/kernel/kexec.c
89696index 9a8a01a..3c35dd6 100644
89697--- a/kernel/kexec.c
89698+++ b/kernel/kexec.c
89699@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89700 compat_ulong_t, flags)
89701 {
89702 struct compat_kexec_segment in;
89703- struct kexec_segment out, __user *ksegments;
89704+ struct kexec_segment out;
89705+ struct kexec_segment __user *ksegments;
89706 unsigned long i, result;
89707
89708 /* Don't allow clients that don't understand the native
89709diff --git a/kernel/kmod.c b/kernel/kmod.c
89710index 2777f40..a26e825 100644
89711--- a/kernel/kmod.c
89712+++ b/kernel/kmod.c
89713@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89714 kfree(info->argv);
89715 }
89716
89717-static int call_modprobe(char *module_name, int wait)
89718+static int call_modprobe(char *module_name, char *module_param, int wait)
89719 {
89720 struct subprocess_info *info;
89721 static char *envp[] = {
89722@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89723 NULL
89724 };
89725
89726- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89727+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89728 if (!argv)
89729 goto out;
89730
89731@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89732 argv[1] = "-q";
89733 argv[2] = "--";
89734 argv[3] = module_name; /* check free_modprobe_argv() */
89735- argv[4] = NULL;
89736+ argv[4] = module_param;
89737+ argv[5] = NULL;
89738
89739 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89740 NULL, free_modprobe_argv, NULL);
89741@@ -122,9 +123,8 @@ out:
89742 * If module auto-loading support is disabled then this function
89743 * becomes a no-operation.
89744 */
89745-int __request_module(bool wait, const char *fmt, ...)
89746+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89747 {
89748- va_list args;
89749 char module_name[MODULE_NAME_LEN];
89750 unsigned int max_modprobes;
89751 int ret;
89752@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89753 if (!modprobe_path[0])
89754 return 0;
89755
89756- va_start(args, fmt);
89757- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89758- va_end(args);
89759+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89760 if (ret >= MODULE_NAME_LEN)
89761 return -ENAMETOOLONG;
89762
89763@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89764 if (ret)
89765 return ret;
89766
89767+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89768+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89769+ /* hack to workaround consolekit/udisks stupidity */
89770+ read_lock(&tasklist_lock);
89771+ if (!strcmp(current->comm, "mount") &&
89772+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89773+ read_unlock(&tasklist_lock);
89774+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89775+ return -EPERM;
89776+ }
89777+ read_unlock(&tasklist_lock);
89778+ }
89779+#endif
89780+
89781 /* If modprobe needs a service that is in a module, we get a recursive
89782 * loop. Limit the number of running kmod threads to max_threads/2 or
89783 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89784@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89785
89786 trace_module_request(module_name, wait, _RET_IP_);
89787
89788- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89789+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89790
89791 atomic_dec(&kmod_concurrent);
89792 return ret;
89793 }
89794+
89795+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89796+{
89797+ va_list args;
89798+ int ret;
89799+
89800+ va_start(args, fmt);
89801+ ret = ____request_module(wait, module_param, fmt, args);
89802+ va_end(args);
89803+
89804+ return ret;
89805+}
89806+
89807+int __request_module(bool wait, const char *fmt, ...)
89808+{
89809+ va_list args;
89810+ int ret;
89811+
89812+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89813+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89814+ char module_param[MODULE_NAME_LEN];
89815+
89816+ memset(module_param, 0, sizeof(module_param));
89817+
89818+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89819+
89820+ va_start(args, fmt);
89821+ ret = ____request_module(wait, module_param, fmt, args);
89822+ va_end(args);
89823+
89824+ return ret;
89825+ }
89826+#endif
89827+
89828+ va_start(args, fmt);
89829+ ret = ____request_module(wait, NULL, fmt, args);
89830+ va_end(args);
89831+
89832+ return ret;
89833+}
89834+
89835 EXPORT_SYMBOL(__request_module);
89836 #endif /* CONFIG_MODULES */
89837
89838 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89839 {
89840+#ifdef CONFIG_GRKERNSEC
89841+ kfree(info->path);
89842+ info->path = info->origpath;
89843+#endif
89844 if (info->cleanup)
89845 (*info->cleanup)(info);
89846 kfree(info);
89847@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
89848 */
89849 set_user_nice(current, 0);
89850
89851+#ifdef CONFIG_GRKERNSEC
89852+ /* this is race-free as far as userland is concerned as we copied
89853+ out the path to be used prior to this point and are now operating
89854+ on that copy
89855+ */
89856+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89857+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89858+ strncmp(sub_info->path, "/usr/libexec/", 13) &&
89859+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89860+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
89861+ retval = -EPERM;
89862+ goto out;
89863+ }
89864+#endif
89865+
89866 retval = -ENOMEM;
89867 new = prepare_kernel_cred(current);
89868 if (!new)
89869@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
89870 commit_creds(new);
89871
89872 retval = do_execve(getname_kernel(sub_info->path),
89873- (const char __user *const __user *)sub_info->argv,
89874- (const char __user *const __user *)sub_info->envp);
89875+ (const char __user *const __force_user *)sub_info->argv,
89876+ (const char __user *const __force_user *)sub_info->envp);
89877 out:
89878 sub_info->retval = retval;
89879 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89880@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
89881 *
89882 * Thus the __user pointer cast is valid here.
89883 */
89884- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89885+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89886
89887 /*
89888 * If ret is 0, either ____call_usermodehelper failed and the
89889@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89890 goto out;
89891
89892 INIT_WORK(&sub_info->work, __call_usermodehelper);
89893+#ifdef CONFIG_GRKERNSEC
89894+ sub_info->origpath = path;
89895+ sub_info->path = kstrdup(path, gfp_mask);
89896+#else
89897 sub_info->path = path;
89898+#endif
89899 sub_info->argv = argv;
89900 sub_info->envp = envp;
89901
89902@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89903 static int proc_cap_handler(struct ctl_table *table, int write,
89904 void __user *buffer, size_t *lenp, loff_t *ppos)
89905 {
89906- struct ctl_table t;
89907+ ctl_table_no_const t;
89908 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89909 kernel_cap_t new_cap;
89910 int err, i;
89911diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89912index ee61992..62142b1 100644
89913--- a/kernel/kprobes.c
89914+++ b/kernel/kprobes.c
89915@@ -31,6 +31,9 @@
89916 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
89917 * <prasanna@in.ibm.com> added function-return probes.
89918 */
89919+#ifdef CONFIG_GRKERNSEC_HIDESYM
89920+#define __INCLUDED_BY_HIDESYM 1
89921+#endif
89922 #include <linux/kprobes.h>
89923 #include <linux/hash.h>
89924 #include <linux/init.h>
89925@@ -122,12 +125,12 @@ enum kprobe_slot_state {
89926
89927 static void *alloc_insn_page(void)
89928 {
89929- return module_alloc(PAGE_SIZE);
89930+ return module_alloc_exec(PAGE_SIZE);
89931 }
89932
89933 static void free_insn_page(void *page)
89934 {
89935- module_memfree(page);
89936+ module_memfree_exec(page);
89937 }
89938
89939 struct kprobe_insn_cache kprobe_insn_slots = {
89940@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
89941 kprobe_type = "k";
89942
89943 if (sym)
89944- seq_printf(pi, "%p %s %s+0x%x %s ",
89945+ seq_printf(pi, "%pK %s %s+0x%x %s ",
89946 p->addr, kprobe_type, sym, offset,
89947 (modname ? modname : " "));
89948 else
89949- seq_printf(pi, "%p %s %p ",
89950+ seq_printf(pi, "%pK %s %pK ",
89951 p->addr, kprobe_type, p->addr);
89952
89953 if (!pp)
89954diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
89955index 6683cce..daf8999 100644
89956--- a/kernel/ksysfs.c
89957+++ b/kernel/ksysfs.c
89958@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
89959 {
89960 if (count+1 > UEVENT_HELPER_PATH_LEN)
89961 return -ENOENT;
89962+ if (!capable(CAP_SYS_ADMIN))
89963+ return -EPERM;
89964 memcpy(uevent_helper, buf, count);
89965 uevent_helper[count] = '\0';
89966 if (count && uevent_helper[count-1] == '\n')
89967@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
89968 return count;
89969 }
89970
89971-static struct bin_attribute notes_attr = {
89972+static bin_attribute_no_const notes_attr __read_only = {
89973 .attr = {
89974 .name = "notes",
89975 .mode = S_IRUGO,
89976diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
89977index 88d0d44..e9ce0ee 100644
89978--- a/kernel/locking/lockdep.c
89979+++ b/kernel/locking/lockdep.c
89980@@ -599,6 +599,10 @@ static int static_obj(void *obj)
89981 end = (unsigned long) &_end,
89982 addr = (unsigned long) obj;
89983
89984+#ifdef CONFIG_PAX_KERNEXEC
89985+ start = ktla_ktva(start);
89986+#endif
89987+
89988 /*
89989 * static variable?
89990 */
89991@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
89992 if (!static_obj(lock->key)) {
89993 debug_locks_off();
89994 printk("INFO: trying to register non-static key.\n");
89995+ printk("lock:%pS key:%pS.\n", lock, lock->key);
89996 printk("the code is fine but needs lockdep annotation.\n");
89997 printk("turning off the locking correctness validator.\n");
89998 dump_stack();
89999@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90000 if (!class)
90001 return 0;
90002 }
90003- atomic_inc((atomic_t *)&class->ops);
90004+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90005 if (very_verbose(class)) {
90006 printk("\nacquire class [%p] %s", class->key, class->name);
90007 if (class->name_version > 1)
90008diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90009index ef43ac4..2720dfa 100644
90010--- a/kernel/locking/lockdep_proc.c
90011+++ b/kernel/locking/lockdep_proc.c
90012@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90013 return 0;
90014 }
90015
90016- seq_printf(m, "%p", class->key);
90017+ seq_printf(m, "%pK", class->key);
90018 #ifdef CONFIG_DEBUG_LOCKDEP
90019 seq_printf(m, " OPS:%8ld", class->ops);
90020 #endif
90021@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90022
90023 list_for_each_entry(entry, &class->locks_after, entry) {
90024 if (entry->distance == 1) {
90025- seq_printf(m, " -> [%p] ", entry->class->key);
90026+ seq_printf(m, " -> [%pK] ", entry->class->key);
90027 print_name(m, entry->class);
90028 seq_puts(m, "\n");
90029 }
90030@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90031 if (!class->key)
90032 continue;
90033
90034- seq_printf(m, "[%p] ", class->key);
90035+ seq_printf(m, "[%pK] ", class->key);
90036 print_name(m, class);
90037 seq_puts(m, "\n");
90038 }
90039@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90040 if (!i)
90041 seq_line(m, '-', 40-namelen, namelen);
90042
90043- snprintf(ip, sizeof(ip), "[<%p>]",
90044+ snprintf(ip, sizeof(ip), "[<%pK>]",
90045 (void *)class->contention_point[i]);
90046 seq_printf(m, "%40s %14lu %29s %pS\n",
90047 name, stats->contention_point[i],
90048@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90049 if (!i)
90050 seq_line(m, '-', 40-namelen, namelen);
90051
90052- snprintf(ip, sizeof(ip), "[<%p>]",
90053+ snprintf(ip, sizeof(ip), "[<%pK>]",
90054 (void *)class->contending_point[i]);
90055 seq_printf(m, "%40s %14lu %29s %pS\n",
90056 name, stats->contending_point[i],
90057diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
90058index 9887a90..0cd2b1d 100644
90059--- a/kernel/locking/mcs_spinlock.c
90060+++ b/kernel/locking/mcs_spinlock.c
90061@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90062
90063 prev = decode_cpu(old);
90064 node->prev = prev;
90065- ACCESS_ONCE(prev->next) = node;
90066+ ACCESS_ONCE_RW(prev->next) = node;
90067
90068 /*
90069 * Normally @prev is untouchable after the above store; because at that
90070@@ -172,8 +172,8 @@ unqueue:
90071 * it will wait in Step-A.
90072 */
90073
90074- ACCESS_ONCE(next->prev) = prev;
90075- ACCESS_ONCE(prev->next) = next;
90076+ ACCESS_ONCE_RW(next->prev) = prev;
90077+ ACCESS_ONCE_RW(prev->next) = next;
90078
90079 return false;
90080 }
90081@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90082 node = this_cpu_ptr(&osq_node);
90083 next = xchg(&node->next, NULL);
90084 if (next) {
90085- ACCESS_ONCE(next->locked) = 1;
90086+ ACCESS_ONCE_RW(next->locked) = 1;
90087 return;
90088 }
90089
90090 next = osq_wait_next(lock, node, NULL);
90091 if (next)
90092- ACCESS_ONCE(next->locked) = 1;
90093+ ACCESS_ONCE_RW(next->locked) = 1;
90094 }
90095
90096 #endif
90097diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90098index 4d60986..5d351c1 100644
90099--- a/kernel/locking/mcs_spinlock.h
90100+++ b/kernel/locking/mcs_spinlock.h
90101@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90102 */
90103 return;
90104 }
90105- ACCESS_ONCE(prev->next) = node;
90106+ ACCESS_ONCE_RW(prev->next) = node;
90107
90108 /* Wait until the lock holder passes the lock down. */
90109 arch_mcs_spin_lock_contended(&node->locked);
90110diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90111index 3ef3736..9c951fa 100644
90112--- a/kernel/locking/mutex-debug.c
90113+++ b/kernel/locking/mutex-debug.c
90114@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90115 }
90116
90117 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90118- struct thread_info *ti)
90119+ struct task_struct *task)
90120 {
90121 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90122
90123 /* Mark the current thread as blocked on the lock: */
90124- ti->task->blocked_on = waiter;
90125+ task->blocked_on = waiter;
90126 }
90127
90128 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90129- struct thread_info *ti)
90130+ struct task_struct *task)
90131 {
90132 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90133- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90134- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90135- ti->task->blocked_on = NULL;
90136+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90137+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90138+ task->blocked_on = NULL;
90139
90140 list_del_init(&waiter->list);
90141 waiter->task = NULL;
90142diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90143index 0799fd3..d06ae3b 100644
90144--- a/kernel/locking/mutex-debug.h
90145+++ b/kernel/locking/mutex-debug.h
90146@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90147 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90148 extern void debug_mutex_add_waiter(struct mutex *lock,
90149 struct mutex_waiter *waiter,
90150- struct thread_info *ti);
90151+ struct task_struct *task);
90152 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90153- struct thread_info *ti);
90154+ struct task_struct *task);
90155 extern void debug_mutex_unlock(struct mutex *lock);
90156 extern void debug_mutex_init(struct mutex *lock, const char *name,
90157 struct lock_class_key *key);
90158diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90159index 4541951..39fe90a 100644
90160--- a/kernel/locking/mutex.c
90161+++ b/kernel/locking/mutex.c
90162@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90163 goto skip_wait;
90164
90165 debug_mutex_lock_common(lock, &waiter);
90166- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90167+ debug_mutex_add_waiter(lock, &waiter, task);
90168
90169 /* add waiting tasks to the end of the waitqueue (FIFO): */
90170 list_add_tail(&waiter.list, &lock->wait_list);
90171@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90172 schedule_preempt_disabled();
90173 spin_lock_mutex(&lock->wait_lock, flags);
90174 }
90175- mutex_remove_waiter(lock, &waiter, current_thread_info());
90176+ mutex_remove_waiter(lock, &waiter, task);
90177 /* set it to 0 if there are no waiters left: */
90178 if (likely(list_empty(&lock->wait_list)))
90179 atomic_set(&lock->count, 0);
90180@@ -606,7 +606,7 @@ skip_wait:
90181 return 0;
90182
90183 err:
90184- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90185+ mutex_remove_waiter(lock, &waiter, task);
90186 spin_unlock_mutex(&lock->wait_lock, flags);
90187 debug_mutex_free_waiter(&waiter);
90188 mutex_release(&lock->dep_map, 1, ip);
90189diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90190index 1d96dd0..994ff19 100644
90191--- a/kernel/locking/rtmutex-tester.c
90192+++ b/kernel/locking/rtmutex-tester.c
90193@@ -22,7 +22,7 @@
90194 #define MAX_RT_TEST_MUTEXES 8
90195
90196 static spinlock_t rttest_lock;
90197-static atomic_t rttest_event;
90198+static atomic_unchecked_t rttest_event;
90199
90200 struct test_thread_data {
90201 int opcode;
90202@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90203
90204 case RTTEST_LOCKCONT:
90205 td->mutexes[td->opdata] = 1;
90206- td->event = atomic_add_return(1, &rttest_event);
90207+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90208 return 0;
90209
90210 case RTTEST_RESET:
90211@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90212 return 0;
90213
90214 case RTTEST_RESETEVENT:
90215- atomic_set(&rttest_event, 0);
90216+ atomic_set_unchecked(&rttest_event, 0);
90217 return 0;
90218
90219 default:
90220@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90221 return ret;
90222
90223 td->mutexes[id] = 1;
90224- td->event = atomic_add_return(1, &rttest_event);
90225+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90226 rt_mutex_lock(&mutexes[id]);
90227- td->event = atomic_add_return(1, &rttest_event);
90228+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90229 td->mutexes[id] = 4;
90230 return 0;
90231
90232@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90233 return ret;
90234
90235 td->mutexes[id] = 1;
90236- td->event = atomic_add_return(1, &rttest_event);
90237+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90238 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90239- td->event = atomic_add_return(1, &rttest_event);
90240+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90241 td->mutexes[id] = ret ? 0 : 4;
90242 return ret ? -EINTR : 0;
90243
90244@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90245 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90246 return ret;
90247
90248- td->event = atomic_add_return(1, &rttest_event);
90249+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90250 rt_mutex_unlock(&mutexes[id]);
90251- td->event = atomic_add_return(1, &rttest_event);
90252+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90253 td->mutexes[id] = 0;
90254 return 0;
90255
90256@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90257 break;
90258
90259 td->mutexes[dat] = 2;
90260- td->event = atomic_add_return(1, &rttest_event);
90261+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90262 break;
90263
90264 default:
90265@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90266 return;
90267
90268 td->mutexes[dat] = 3;
90269- td->event = atomic_add_return(1, &rttest_event);
90270+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90271 break;
90272
90273 case RTTEST_LOCKNOWAIT:
90274@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90275 return;
90276
90277 td->mutexes[dat] = 1;
90278- td->event = atomic_add_return(1, &rttest_event);
90279+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90280 return;
90281
90282 default:
90283diff --git a/kernel/module.c b/kernel/module.c
90284index d856e96..b82225c 100644
90285--- a/kernel/module.c
90286+++ b/kernel/module.c
90287@@ -59,6 +59,7 @@
90288 #include <linux/jump_label.h>
90289 #include <linux/pfn.h>
90290 #include <linux/bsearch.h>
90291+#include <linux/grsecurity.h>
90292 #include <uapi/linux/module.h>
90293 #include "module-internal.h"
90294
90295@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90296
90297 /* Bounds of module allocation, for speeding __module_address.
90298 * Protected by module_mutex. */
90299-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90300+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90301+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90302
90303 int register_module_notifier(struct notifier_block *nb)
90304 {
90305@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90306 return true;
90307
90308 list_for_each_entry_rcu(mod, &modules, list) {
90309- struct symsearch arr[] = {
90310+ struct symsearch modarr[] = {
90311 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90312 NOT_GPL_ONLY, false },
90313 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90314@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90315 if (mod->state == MODULE_STATE_UNFORMED)
90316 continue;
90317
90318- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90319+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90320 return true;
90321 }
90322 return false;
90323@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90324 if (!pcpusec->sh_size)
90325 return 0;
90326
90327- if (align > PAGE_SIZE) {
90328+ if (align-1 >= PAGE_SIZE) {
90329 pr_warn("%s: per-cpu alignment %li > %li\n",
90330 mod->name, align, PAGE_SIZE);
90331 align = PAGE_SIZE;
90332@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90333 static ssize_t show_coresize(struct module_attribute *mattr,
90334 struct module_kobject *mk, char *buffer)
90335 {
90336- return sprintf(buffer, "%u\n", mk->mod->core_size);
90337+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90338 }
90339
90340 static struct module_attribute modinfo_coresize =
90341@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90342 static ssize_t show_initsize(struct module_attribute *mattr,
90343 struct module_kobject *mk, char *buffer)
90344 {
90345- return sprintf(buffer, "%u\n", mk->mod->init_size);
90346+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90347 }
90348
90349 static struct module_attribute modinfo_initsize =
90350@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90351 goto bad_version;
90352 }
90353
90354+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90355+ /*
90356+ * avoid potentially printing jibberish on attempted load
90357+ * of a module randomized with a different seed
90358+ */
90359+ pr_warn("no symbol version for %s\n", symname);
90360+#else
90361 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90362+#endif
90363 return 0;
90364
90365 bad_version:
90366+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90367+ /*
90368+ * avoid potentially printing jibberish on attempted load
90369+ * of a module randomized with a different seed
90370+ */
90371+ pr_warn("attempted module disagrees about version of symbol %s\n",
90372+ symname);
90373+#else
90374 pr_warn("%s: disagrees about version of symbol %s\n",
90375 mod->name, symname);
90376+#endif
90377 return 0;
90378 }
90379
90380@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90381 */
90382 #ifdef CONFIG_SYSFS
90383
90384-#ifdef CONFIG_KALLSYMS
90385+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90386 static inline bool sect_empty(const Elf_Shdr *sect)
90387 {
90388 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90389@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90390 {
90391 unsigned int notes, loaded, i;
90392 struct module_notes_attrs *notes_attrs;
90393- struct bin_attribute *nattr;
90394+ bin_attribute_no_const *nattr;
90395
90396 /* failed to create section attributes, so can't create notes */
90397 if (!mod->sect_attrs)
90398@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90399 static int module_add_modinfo_attrs(struct module *mod)
90400 {
90401 struct module_attribute *attr;
90402- struct module_attribute *temp_attr;
90403+ module_attribute_no_const *temp_attr;
90404 int error = 0;
90405 int i;
90406
90407@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90408
90409 static void unset_module_core_ro_nx(struct module *mod)
90410 {
90411- set_page_attributes(mod->module_core + mod->core_text_size,
90412- mod->module_core + mod->core_size,
90413+ set_page_attributes(mod->module_core_rw,
90414+ mod->module_core_rw + mod->core_size_rw,
90415 set_memory_x);
90416- set_page_attributes(mod->module_core,
90417- mod->module_core + mod->core_ro_size,
90418+ set_page_attributes(mod->module_core_rx,
90419+ mod->module_core_rx + mod->core_size_rx,
90420 set_memory_rw);
90421 }
90422
90423 static void unset_module_init_ro_nx(struct module *mod)
90424 {
90425- set_page_attributes(mod->module_init + mod->init_text_size,
90426- mod->module_init + mod->init_size,
90427+ set_page_attributes(mod->module_init_rw,
90428+ mod->module_init_rw + mod->init_size_rw,
90429 set_memory_x);
90430- set_page_attributes(mod->module_init,
90431- mod->module_init + mod->init_ro_size,
90432+ set_page_attributes(mod->module_init_rx,
90433+ mod->module_init_rx + mod->init_size_rx,
90434 set_memory_rw);
90435 }
90436
90437@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90438 list_for_each_entry_rcu(mod, &modules, list) {
90439 if (mod->state == MODULE_STATE_UNFORMED)
90440 continue;
90441- if ((mod->module_core) && (mod->core_text_size)) {
90442- set_page_attributes(mod->module_core,
90443- mod->module_core + mod->core_text_size,
90444+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90445+ set_page_attributes(mod->module_core_rx,
90446+ mod->module_core_rx + mod->core_size_rx,
90447 set_memory_rw);
90448 }
90449- if ((mod->module_init) && (mod->init_text_size)) {
90450- set_page_attributes(mod->module_init,
90451- mod->module_init + mod->init_text_size,
90452+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90453+ set_page_attributes(mod->module_init_rx,
90454+ mod->module_init_rx + mod->init_size_rx,
90455 set_memory_rw);
90456 }
90457 }
90458@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90459 list_for_each_entry_rcu(mod, &modules, list) {
90460 if (mod->state == MODULE_STATE_UNFORMED)
90461 continue;
90462- if ((mod->module_core) && (mod->core_text_size)) {
90463- set_page_attributes(mod->module_core,
90464- mod->module_core + mod->core_text_size,
90465+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90466+ set_page_attributes(mod->module_core_rx,
90467+ mod->module_core_rx + mod->core_size_rx,
90468 set_memory_ro);
90469 }
90470- if ((mod->module_init) && (mod->init_text_size)) {
90471- set_page_attributes(mod->module_init,
90472- mod->module_init + mod->init_text_size,
90473+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90474+ set_page_attributes(mod->module_init_rx,
90475+ mod->module_init_rx + mod->init_size_rx,
90476 set_memory_ro);
90477 }
90478 }
90479@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90480 #else
90481 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90482 static void unset_module_core_ro_nx(struct module *mod) { }
90483-static void unset_module_init_ro_nx(struct module *mod) { }
90484+static void unset_module_init_ro_nx(struct module *mod)
90485+{
90486+
90487+#ifdef CONFIG_PAX_KERNEXEC
90488+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90489+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90490+#endif
90491+
90492+}
90493 #endif
90494
90495 void __weak module_memfree(void *module_region)
90496@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90497 /* This may be NULL, but that's OK */
90498 unset_module_init_ro_nx(mod);
90499 module_arch_freeing_init(mod);
90500- module_memfree(mod->module_init);
90501+ module_memfree(mod->module_init_rw);
90502+ module_memfree_exec(mod->module_init_rx);
90503 kfree(mod->args);
90504 percpu_modfree(mod);
90505
90506 /* Free lock-classes: */
90507- lockdep_free_key_range(mod->module_core, mod->core_size);
90508+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90509+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90510
90511 /* Finally, free the core (containing the module structure) */
90512 unset_module_core_ro_nx(mod);
90513- module_memfree(mod->module_core);
90514+ module_memfree_exec(mod->module_core_rx);
90515+ module_memfree(mod->module_core_rw);
90516
90517 #ifdef CONFIG_MPU
90518 update_protections(current->mm);
90519@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90520 int ret = 0;
90521 const struct kernel_symbol *ksym;
90522
90523+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90524+ int is_fs_load = 0;
90525+ int register_filesystem_found = 0;
90526+ char *p;
90527+
90528+ p = strstr(mod->args, "grsec_modharden_fs");
90529+ if (p) {
90530+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90531+ /* copy \0 as well */
90532+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90533+ is_fs_load = 1;
90534+ }
90535+#endif
90536+
90537 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90538 const char *name = info->strtab + sym[i].st_name;
90539
90540+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90541+ /* it's a real shame this will never get ripped and copied
90542+ upstream! ;(
90543+ */
90544+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90545+ register_filesystem_found = 1;
90546+#endif
90547+
90548 switch (sym[i].st_shndx) {
90549 case SHN_COMMON:
90550 /* Ignore common symbols */
90551@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90552 ksym = resolve_symbol_wait(mod, info, name);
90553 /* Ok if resolved. */
90554 if (ksym && !IS_ERR(ksym)) {
90555+ pax_open_kernel();
90556 sym[i].st_value = ksym->value;
90557+ pax_close_kernel();
90558 break;
90559 }
90560
90561@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90562 secbase = (unsigned long)mod_percpu(mod);
90563 else
90564 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90565+ pax_open_kernel();
90566 sym[i].st_value += secbase;
90567+ pax_close_kernel();
90568 break;
90569 }
90570 }
90571
90572+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90573+ if (is_fs_load && !register_filesystem_found) {
90574+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90575+ ret = -EPERM;
90576+ }
90577+#endif
90578+
90579 return ret;
90580 }
90581
90582@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90583 || s->sh_entsize != ~0UL
90584 || strstarts(sname, ".init"))
90585 continue;
90586- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90587+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90588+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90589+ else
90590+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90591 pr_debug("\t%s\n", sname);
90592 }
90593- switch (m) {
90594- case 0: /* executable */
90595- mod->core_size = debug_align(mod->core_size);
90596- mod->core_text_size = mod->core_size;
90597- break;
90598- case 1: /* RO: text and ro-data */
90599- mod->core_size = debug_align(mod->core_size);
90600- mod->core_ro_size = mod->core_size;
90601- break;
90602- case 3: /* whole core */
90603- mod->core_size = debug_align(mod->core_size);
90604- break;
90605- }
90606 }
90607
90608 pr_debug("Init section allocation order:\n");
90609@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90610 || s->sh_entsize != ~0UL
90611 || !strstarts(sname, ".init"))
90612 continue;
90613- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90614- | INIT_OFFSET_MASK);
90615+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90616+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90617+ else
90618+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90619+ s->sh_entsize |= INIT_OFFSET_MASK;
90620 pr_debug("\t%s\n", sname);
90621 }
90622- switch (m) {
90623- case 0: /* executable */
90624- mod->init_size = debug_align(mod->init_size);
90625- mod->init_text_size = mod->init_size;
90626- break;
90627- case 1: /* RO: text and ro-data */
90628- mod->init_size = debug_align(mod->init_size);
90629- mod->init_ro_size = mod->init_size;
90630- break;
90631- case 3: /* whole init */
90632- mod->init_size = debug_align(mod->init_size);
90633- break;
90634- }
90635 }
90636 }
90637
90638@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90639
90640 /* Put symbol section at end of init part of module. */
90641 symsect->sh_flags |= SHF_ALLOC;
90642- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90643+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90644 info->index.sym) | INIT_OFFSET_MASK;
90645 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90646
90647@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90648 }
90649
90650 /* Append room for core symbols at end of core part. */
90651- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90652- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90653- mod->core_size += strtab_size;
90654+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90655+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90656+ mod->core_size_rx += strtab_size;
90657
90658 /* Put string table section at end of init part of module. */
90659 strsect->sh_flags |= SHF_ALLOC;
90660- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90661+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90662 info->index.str) | INIT_OFFSET_MASK;
90663 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90664 }
90665@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90666 /* Make sure we get permanent strtab: don't use info->strtab. */
90667 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90668
90669+ pax_open_kernel();
90670+
90671 /* Set types up while we still have access to sections. */
90672 for (i = 0; i < mod->num_symtab; i++)
90673 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90674
90675- mod->core_symtab = dst = mod->module_core + info->symoffs;
90676- mod->core_strtab = s = mod->module_core + info->stroffs;
90677+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90678+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90679 src = mod->symtab;
90680 for (ndst = i = 0; i < mod->num_symtab; i++) {
90681 if (i == 0 ||
90682@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90683 }
90684 }
90685 mod->core_num_syms = ndst;
90686+
90687+ pax_close_kernel();
90688 }
90689 #else
90690 static inline void layout_symtab(struct module *mod, struct load_info *info)
90691@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90692 return vmalloc_exec(size);
90693 }
90694
90695-static void *module_alloc_update_bounds(unsigned long size)
90696+static void *module_alloc_update_bounds_rw(unsigned long size)
90697 {
90698 void *ret = module_alloc(size);
90699
90700 if (ret) {
90701 mutex_lock(&module_mutex);
90702 /* Update module bounds. */
90703- if ((unsigned long)ret < module_addr_min)
90704- module_addr_min = (unsigned long)ret;
90705- if ((unsigned long)ret + size > module_addr_max)
90706- module_addr_max = (unsigned long)ret + size;
90707+ if ((unsigned long)ret < module_addr_min_rw)
90708+ module_addr_min_rw = (unsigned long)ret;
90709+ if ((unsigned long)ret + size > module_addr_max_rw)
90710+ module_addr_max_rw = (unsigned long)ret + size;
90711+ mutex_unlock(&module_mutex);
90712+ }
90713+ return ret;
90714+}
90715+
90716+static void *module_alloc_update_bounds_rx(unsigned long size)
90717+{
90718+ void *ret = module_alloc_exec(size);
90719+
90720+ if (ret) {
90721+ mutex_lock(&module_mutex);
90722+ /* Update module bounds. */
90723+ if ((unsigned long)ret < module_addr_min_rx)
90724+ module_addr_min_rx = (unsigned long)ret;
90725+ if ((unsigned long)ret + size > module_addr_max_rx)
90726+ module_addr_max_rx = (unsigned long)ret + size;
90727 mutex_unlock(&module_mutex);
90728 }
90729 return ret;
90730@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90731 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90732
90733 if (info->index.sym == 0) {
90734+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90735+ /*
90736+ * avoid potentially printing jibberish on attempted load
90737+ * of a module randomized with a different seed
90738+ */
90739+ pr_warn("module has no symbols (stripped?)\n");
90740+#else
90741 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90742+#endif
90743 return ERR_PTR(-ENOEXEC);
90744 }
90745
90746@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90747 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90748 {
90749 const char *modmagic = get_modinfo(info, "vermagic");
90750+ const char *license = get_modinfo(info, "license");
90751 int err;
90752
90753+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90754+ if (!license || !license_is_gpl_compatible(license))
90755+ return -ENOEXEC;
90756+#endif
90757+
90758 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90759 modmagic = NULL;
90760
90761@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90762 }
90763
90764 /* Set up license info based on the info section */
90765- set_license(mod, get_modinfo(info, "license"));
90766+ set_license(mod, license);
90767
90768 return 0;
90769 }
90770@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90771 void *ptr;
90772
90773 /* Do the allocs. */
90774- ptr = module_alloc_update_bounds(mod->core_size);
90775+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90776 /*
90777 * The pointer to this block is stored in the module structure
90778 * which is inside the block. Just mark it as not being a
90779@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90780 if (!ptr)
90781 return -ENOMEM;
90782
90783- memset(ptr, 0, mod->core_size);
90784- mod->module_core = ptr;
90785+ memset(ptr, 0, mod->core_size_rw);
90786+ mod->module_core_rw = ptr;
90787
90788- if (mod->init_size) {
90789- ptr = module_alloc_update_bounds(mod->init_size);
90790+ if (mod->init_size_rw) {
90791+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90792 /*
90793 * The pointer to this block is stored in the module structure
90794 * which is inside the block. This block doesn't need to be
90795@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90796 */
90797 kmemleak_ignore(ptr);
90798 if (!ptr) {
90799- module_memfree(mod->module_core);
90800+ module_memfree(mod->module_core_rw);
90801 return -ENOMEM;
90802 }
90803- memset(ptr, 0, mod->init_size);
90804- mod->module_init = ptr;
90805+ memset(ptr, 0, mod->init_size_rw);
90806+ mod->module_init_rw = ptr;
90807 } else
90808- mod->module_init = NULL;
90809+ mod->module_init_rw = NULL;
90810+
90811+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90812+ kmemleak_not_leak(ptr);
90813+ if (!ptr) {
90814+ if (mod->module_init_rw)
90815+ module_memfree(mod->module_init_rw);
90816+ module_memfree(mod->module_core_rw);
90817+ return -ENOMEM;
90818+ }
90819+
90820+ pax_open_kernel();
90821+ memset(ptr, 0, mod->core_size_rx);
90822+ pax_close_kernel();
90823+ mod->module_core_rx = ptr;
90824+
90825+ if (mod->init_size_rx) {
90826+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90827+ kmemleak_ignore(ptr);
90828+ if (!ptr && mod->init_size_rx) {
90829+ module_memfree_exec(mod->module_core_rx);
90830+ if (mod->module_init_rw)
90831+ module_memfree(mod->module_init_rw);
90832+ module_memfree(mod->module_core_rw);
90833+ return -ENOMEM;
90834+ }
90835+
90836+ pax_open_kernel();
90837+ memset(ptr, 0, mod->init_size_rx);
90838+ pax_close_kernel();
90839+ mod->module_init_rx = ptr;
90840+ } else
90841+ mod->module_init_rx = NULL;
90842
90843 /* Transfer each section which specifies SHF_ALLOC */
90844 pr_debug("final section addresses:\n");
90845@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90846 if (!(shdr->sh_flags & SHF_ALLOC))
90847 continue;
90848
90849- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90850- dest = mod->module_init
90851- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90852- else
90853- dest = mod->module_core + shdr->sh_entsize;
90854+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90855+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90856+ dest = mod->module_init_rw
90857+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90858+ else
90859+ dest = mod->module_init_rx
90860+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90861+ } else {
90862+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90863+ dest = mod->module_core_rw + shdr->sh_entsize;
90864+ else
90865+ dest = mod->module_core_rx + shdr->sh_entsize;
90866+ }
90867+
90868+ if (shdr->sh_type != SHT_NOBITS) {
90869+
90870+#ifdef CONFIG_PAX_KERNEXEC
90871+#ifdef CONFIG_X86_64
90872+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90873+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90874+#endif
90875+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90876+ pax_open_kernel();
90877+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90878+ pax_close_kernel();
90879+ } else
90880+#endif
90881
90882- if (shdr->sh_type != SHT_NOBITS)
90883 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90884+ }
90885 /* Update sh_addr to point to copy in image. */
90886- shdr->sh_addr = (unsigned long)dest;
90887+
90888+#ifdef CONFIG_PAX_KERNEXEC
90889+ if (shdr->sh_flags & SHF_EXECINSTR)
90890+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90891+ else
90892+#endif
90893+
90894+ shdr->sh_addr = (unsigned long)dest;
90895 pr_debug("\t0x%lx %s\n",
90896 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90897 }
90898@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90899 * Do it before processing of module parameters, so the module
90900 * can provide parameter accessor functions of its own.
90901 */
90902- if (mod->module_init)
90903- flush_icache_range((unsigned long)mod->module_init,
90904- (unsigned long)mod->module_init
90905- + mod->init_size);
90906- flush_icache_range((unsigned long)mod->module_core,
90907- (unsigned long)mod->module_core + mod->core_size);
90908+ if (mod->module_init_rx)
90909+ flush_icache_range((unsigned long)mod->module_init_rx,
90910+ (unsigned long)mod->module_init_rx
90911+ + mod->init_size_rx);
90912+ flush_icache_range((unsigned long)mod->module_core_rx,
90913+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90914
90915 set_fs(old_fs);
90916 }
90917@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
90918 {
90919 percpu_modfree(mod);
90920 module_arch_freeing_init(mod);
90921- module_memfree(mod->module_init);
90922- module_memfree(mod->module_core);
90923+ module_memfree_exec(mod->module_init_rx);
90924+ module_memfree_exec(mod->module_core_rx);
90925+ module_memfree(mod->module_init_rw);
90926+ module_memfree(mod->module_core_rw);
90927 }
90928
90929 int __weak module_finalize(const Elf_Ehdr *hdr,
90930@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
90931 static int post_relocation(struct module *mod, const struct load_info *info)
90932 {
90933 /* Sort exception table now relocations are done. */
90934+ pax_open_kernel();
90935 sort_extable(mod->extable, mod->extable + mod->num_exentries);
90936+ pax_close_kernel();
90937
90938 /* Copy relocated percpu area over. */
90939 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
90940@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
90941 /* For freeing module_init on success, in case kallsyms traversing */
90942 struct mod_initfree {
90943 struct rcu_head rcu;
90944- void *module_init;
90945+ void *module_init_rw;
90946+ void *module_init_rx;
90947 };
90948
90949 static void do_free_init(struct rcu_head *head)
90950 {
90951 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
90952- module_memfree(m->module_init);
90953+ module_memfree(m->module_init_rw);
90954+ module_memfree_exec(m->module_init_rx);
90955 kfree(m);
90956 }
90957
90958@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
90959 ret = -ENOMEM;
90960 goto fail;
90961 }
90962- freeinit->module_init = mod->module_init;
90963+ freeinit->module_init_rw = mod->module_init_rw;
90964+ freeinit->module_init_rx = mod->module_init_rx;
90965
90966 /*
90967 * We want to find out whether @mod uses async during init. Clear
90968@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
90969 #endif
90970 unset_module_init_ro_nx(mod);
90971 module_arch_freeing_init(mod);
90972- mod->module_init = NULL;
90973- mod->init_size = 0;
90974- mod->init_ro_size = 0;
90975- mod->init_text_size = 0;
90976+ mod->module_init_rw = NULL;
90977+ mod->module_init_rx = NULL;
90978+ mod->init_size_rw = 0;
90979+ mod->init_size_rx = 0;
90980 /*
90981 * We want to free module_init, but be aware that kallsyms may be
90982 * walking this with preempt disabled. In all the failure paths,
90983@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
90984 module_bug_finalize(info->hdr, info->sechdrs, mod);
90985
90986 /* Set RO and NX regions for core */
90987- set_section_ro_nx(mod->module_core,
90988- mod->core_text_size,
90989- mod->core_ro_size,
90990- mod->core_size);
90991+ set_section_ro_nx(mod->module_core_rx,
90992+ mod->core_size_rx,
90993+ mod->core_size_rx,
90994+ mod->core_size_rx);
90995
90996 /* Set RO and NX regions for init */
90997- set_section_ro_nx(mod->module_init,
90998- mod->init_text_size,
90999- mod->init_ro_size,
91000- mod->init_size);
91001+ set_section_ro_nx(mod->module_init_rx,
91002+ mod->init_size_rx,
91003+ mod->init_size_rx,
91004+ mod->init_size_rx);
91005
91006 /* Mark state as coming so strong_try_module_get() ignores us,
91007 * but kallsyms etc. can see us. */
91008@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91009 if (err)
91010 goto free_unload;
91011
91012+ /* Now copy in args */
91013+ mod->args = strndup_user(uargs, ~0UL >> 1);
91014+ if (IS_ERR(mod->args)) {
91015+ err = PTR_ERR(mod->args);
91016+ goto free_unload;
91017+ }
91018+
91019 /* Set up MODINFO_ATTR fields */
91020 setup_modinfo(mod, info);
91021
91022+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91023+ {
91024+ char *p, *p2;
91025+
91026+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91027+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91028+ err = -EPERM;
91029+ goto free_modinfo;
91030+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91031+ p += sizeof("grsec_modharden_normal") - 1;
91032+ p2 = strstr(p, "_");
91033+ if (p2) {
91034+ *p2 = '\0';
91035+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91036+ *p2 = '_';
91037+ }
91038+ err = -EPERM;
91039+ goto free_modinfo;
91040+ }
91041+ }
91042+#endif
91043+
91044 /* Fix up syms, so that st_value is a pointer to location. */
91045 err = simplify_symbols(mod, info);
91046 if (err < 0)
91047@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91048
91049 flush_module_icache(mod);
91050
91051- /* Now copy in args */
91052- mod->args = strndup_user(uargs, ~0UL >> 1);
91053- if (IS_ERR(mod->args)) {
91054- err = PTR_ERR(mod->args);
91055- goto free_arch_cleanup;
91056- }
91057-
91058 dynamic_debug_setup(info->debug, info->num_debug);
91059
91060 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91061@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91062 ddebug_cleanup:
91063 dynamic_debug_remove(info->debug);
91064 synchronize_sched();
91065- kfree(mod->args);
91066- free_arch_cleanup:
91067 module_arch_cleanup(mod);
91068 free_modinfo:
91069 free_modinfo(mod);
91070+ kfree(mod->args);
91071 free_unload:
91072 module_unload_free(mod);
91073 unlink_mod:
91074@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
91075 unsigned long nextval;
91076
91077 /* At worse, next value is at end of module */
91078- if (within_module_init(addr, mod))
91079- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91080+ if (within_module_init_rx(addr, mod))
91081+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91082+ else if (within_module_init_rw(addr, mod))
91083+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91084+ else if (within_module_core_rx(addr, mod))
91085+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91086+ else if (within_module_core_rw(addr, mod))
91087+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91088 else
91089- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91090+ return NULL;
91091
91092 /* Scan for closest preceding symbol, and next symbol. (ELF
91093 starts real symbols at 1). */
91094@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
91095 return 0;
91096
91097 seq_printf(m, "%s %u",
91098- mod->name, mod->init_size + mod->core_size);
91099+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91100 print_unload_info(m, mod);
91101
91102 /* Informative for users. */
91103@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
91104 mod->state == MODULE_STATE_COMING ? "Loading" :
91105 "Live");
91106 /* Used by oprofile and other similar tools. */
91107- seq_printf(m, " 0x%pK", mod->module_core);
91108+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91109
91110 /* Taints info */
91111 if (mod->taints)
91112@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
91113
91114 static int __init proc_modules_init(void)
91115 {
91116+#ifndef CONFIG_GRKERNSEC_HIDESYM
91117+#ifdef CONFIG_GRKERNSEC_PROC_USER
91118+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91119+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91120+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91121+#else
91122 proc_create("modules", 0, NULL, &proc_modules_operations);
91123+#endif
91124+#else
91125+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91126+#endif
91127 return 0;
91128 }
91129 module_init(proc_modules_init);
91130@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91131 {
91132 struct module *mod;
91133
91134- if (addr < module_addr_min || addr > module_addr_max)
91135+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91136+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91137 return NULL;
91138
91139 list_for_each_entry_rcu(mod, &modules, list) {
91140@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91141 */
91142 struct module *__module_text_address(unsigned long addr)
91143 {
91144- struct module *mod = __module_address(addr);
91145+ struct module *mod;
91146+
91147+#ifdef CONFIG_X86_32
91148+ addr = ktla_ktva(addr);
91149+#endif
91150+
91151+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91152+ return NULL;
91153+
91154+ mod = __module_address(addr);
91155+
91156 if (mod) {
91157 /* Make sure it's within the text section. */
91158- if (!within(addr, mod->module_init, mod->init_text_size)
91159- && !within(addr, mod->module_core, mod->core_text_size))
91160+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91161 mod = NULL;
91162 }
91163 return mod;
91164diff --git a/kernel/notifier.c b/kernel/notifier.c
91165index 4803da6..1c5eea6 100644
91166--- a/kernel/notifier.c
91167+++ b/kernel/notifier.c
91168@@ -5,6 +5,7 @@
91169 #include <linux/rcupdate.h>
91170 #include <linux/vmalloc.h>
91171 #include <linux/reboot.h>
91172+#include <linux/mm.h>
91173
91174 /*
91175 * Notifier list for kernel code which wants to be called
91176@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91177 while ((*nl) != NULL) {
91178 if (n->priority > (*nl)->priority)
91179 break;
91180- nl = &((*nl)->next);
91181+ nl = (struct notifier_block **)&((*nl)->next);
91182 }
91183- n->next = *nl;
91184+ pax_open_kernel();
91185+ *(const void **)&n->next = *nl;
91186 rcu_assign_pointer(*nl, n);
91187+ pax_close_kernel();
91188 return 0;
91189 }
91190
91191@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91192 return 0;
91193 if (n->priority > (*nl)->priority)
91194 break;
91195- nl = &((*nl)->next);
91196+ nl = (struct notifier_block **)&((*nl)->next);
91197 }
91198- n->next = *nl;
91199+ pax_open_kernel();
91200+ *(const void **)&n->next = *nl;
91201 rcu_assign_pointer(*nl, n);
91202+ pax_close_kernel();
91203 return 0;
91204 }
91205
91206@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91207 {
91208 while ((*nl) != NULL) {
91209 if ((*nl) == n) {
91210+ pax_open_kernel();
91211 rcu_assign_pointer(*nl, n->next);
91212+ pax_close_kernel();
91213 return 0;
91214 }
91215- nl = &((*nl)->next);
91216+ nl = (struct notifier_block **)&((*nl)->next);
91217 }
91218 return -ENOENT;
91219 }
91220diff --git a/kernel/padata.c b/kernel/padata.c
91221index 161402f..598814c 100644
91222--- a/kernel/padata.c
91223+++ b/kernel/padata.c
91224@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91225 * seq_nr mod. number of cpus in use.
91226 */
91227
91228- seq_nr = atomic_inc_return(&pd->seq_nr);
91229+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91230 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91231
91232 return padata_index_to_cpu(pd, cpu_index);
91233@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91234 padata_init_pqueues(pd);
91235 padata_init_squeues(pd);
91236 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91237- atomic_set(&pd->seq_nr, -1);
91238+ atomic_set_unchecked(&pd->seq_nr, -1);
91239 atomic_set(&pd->reorder_objects, 0);
91240 atomic_set(&pd->refcnt, 0);
91241 pd->pinst = pinst;
91242diff --git a/kernel/panic.c b/kernel/panic.c
91243index 4d8d6f9..97b9b9c 100644
91244--- a/kernel/panic.c
91245+++ b/kernel/panic.c
91246@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91247 /*
91248 * Stop ourself in panic -- architecture code may override this
91249 */
91250-void __weak panic_smp_self_stop(void)
91251+void __weak __noreturn panic_smp_self_stop(void)
91252 {
91253 while (1)
91254 cpu_relax();
91255@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91256 disable_trace_on_warning();
91257
91258 pr_warn("------------[ cut here ]------------\n");
91259- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91260+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91261 raw_smp_processor_id(), current->pid, file, line, caller);
91262
91263 if (args)
91264@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91265 */
91266 __visible void __stack_chk_fail(void)
91267 {
91268- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91269+ dump_stack();
91270+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91271 __builtin_return_address(0));
91272 }
91273 EXPORT_SYMBOL(__stack_chk_fail);
91274diff --git a/kernel/pid.c b/kernel/pid.c
91275index cd36a5e..11f185d 100644
91276--- a/kernel/pid.c
91277+++ b/kernel/pid.c
91278@@ -33,6 +33,7 @@
91279 #include <linux/rculist.h>
91280 #include <linux/bootmem.h>
91281 #include <linux/hash.h>
91282+#include <linux/security.h>
91283 #include <linux/pid_namespace.h>
91284 #include <linux/init_task.h>
91285 #include <linux/syscalls.h>
91286@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91287
91288 int pid_max = PID_MAX_DEFAULT;
91289
91290-#define RESERVED_PIDS 300
91291+#define RESERVED_PIDS 500
91292
91293 int pid_max_min = RESERVED_PIDS + 1;
91294 int pid_max_max = PID_MAX_LIMIT;
91295@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91296 */
91297 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91298 {
91299+ struct task_struct *task;
91300+
91301 rcu_lockdep_assert(rcu_read_lock_held(),
91302 "find_task_by_pid_ns() needs rcu_read_lock()"
91303 " protection");
91304- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91305+
91306+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91307+
91308+ if (gr_pid_is_chrooted(task))
91309+ return NULL;
91310+
91311+ return task;
91312 }
91313
91314 struct task_struct *find_task_by_vpid(pid_t vnr)
91315@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91316 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91317 }
91318
91319+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91320+{
91321+ rcu_lockdep_assert(rcu_read_lock_held(),
91322+ "find_task_by_pid_ns() needs rcu_read_lock()"
91323+ " protection");
91324+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91325+}
91326+
91327 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91328 {
91329 struct pid *pid;
91330diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91331index a65ba13..f600dbb 100644
91332--- a/kernel/pid_namespace.c
91333+++ b/kernel/pid_namespace.c
91334@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91335 void __user *buffer, size_t *lenp, loff_t *ppos)
91336 {
91337 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91338- struct ctl_table tmp = *table;
91339+ ctl_table_no_const tmp = *table;
91340
91341 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91342 return -EPERM;
91343diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91344index 48b28d3..c63ccaf 100644
91345--- a/kernel/power/Kconfig
91346+++ b/kernel/power/Kconfig
91347@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91348 config HIBERNATION
91349 bool "Hibernation (aka 'suspend to disk')"
91350 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91351+ depends on !GRKERNSEC_KMEM
91352+ depends on !PAX_MEMORY_SANITIZE
91353 select HIBERNATE_CALLBACKS
91354 select LZO_COMPRESS
91355 select LZO_DECOMPRESS
91356diff --git a/kernel/power/process.c b/kernel/power/process.c
91357index 5a6ec86..3a8c884 100644
91358--- a/kernel/power/process.c
91359+++ b/kernel/power/process.c
91360@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91361 unsigned int elapsed_msecs;
91362 bool wakeup = false;
91363 int sleep_usecs = USEC_PER_MSEC;
91364+ bool timedout = false;
91365
91366 do_gettimeofday(&start);
91367
91368@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91369
91370 while (true) {
91371 todo = 0;
91372+ if (time_after(jiffies, end_time))
91373+ timedout = true;
91374 read_lock(&tasklist_lock);
91375 for_each_process_thread(g, p) {
91376 if (p == current || !freeze_task(p))
91377 continue;
91378
91379- if (!freezer_should_skip(p))
91380+ if (!freezer_should_skip(p)) {
91381 todo++;
91382+ if (timedout) {
91383+ printk(KERN_ERR "Task refusing to freeze:\n");
91384+ sched_show_task(p);
91385+ }
91386+ }
91387 }
91388 read_unlock(&tasklist_lock);
91389
91390@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91391 todo += wq_busy;
91392 }
91393
91394- if (!todo || time_after(jiffies, end_time))
91395+ if (!todo || timedout)
91396 break;
91397
91398 if (pm_wakeup_pending()) {
91399diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91400index 2cdd353..7df1786 100644
91401--- a/kernel/printk/printk.c
91402+++ b/kernel/printk/printk.c
91403@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91404 if (from_file && type != SYSLOG_ACTION_OPEN)
91405 return 0;
91406
91407+#ifdef CONFIG_GRKERNSEC_DMESG
91408+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91409+ return -EPERM;
91410+#endif
91411+
91412 if (syslog_action_restricted(type)) {
91413 if (capable(CAP_SYSLOG))
91414 return 0;
91415diff --git a/kernel/profile.c b/kernel/profile.c
91416index 54bf5ba..df6e0a2 100644
91417--- a/kernel/profile.c
91418+++ b/kernel/profile.c
91419@@ -37,7 +37,7 @@ struct profile_hit {
91420 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91421 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91422
91423-static atomic_t *prof_buffer;
91424+static atomic_unchecked_t *prof_buffer;
91425 static unsigned long prof_len, prof_shift;
91426
91427 int prof_on __read_mostly;
91428@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91429 hits[i].pc = 0;
91430 continue;
91431 }
91432- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91433+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91434 hits[i].hits = hits[i].pc = 0;
91435 }
91436 }
91437@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91438 * Add the current hit(s) and flush the write-queue out
91439 * to the global buffer:
91440 */
91441- atomic_add(nr_hits, &prof_buffer[pc]);
91442+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91443 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91444- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91445+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91446 hits[i].pc = hits[i].hits = 0;
91447 }
91448 out:
91449@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91450 {
91451 unsigned long pc;
91452 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91453- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91454+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91455 }
91456 #endif /* !CONFIG_SMP */
91457
91458@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91459 return -EFAULT;
91460 buf++; p++; count--; read++;
91461 }
91462- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91463+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91464 if (copy_to_user(buf, (void *)pnt, count))
91465 return -EFAULT;
91466 read += count;
91467@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91468 }
91469 #endif
91470 profile_discard_flip_buffers();
91471- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91472+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91473 return count;
91474 }
91475
91476diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91477index 1eb9d90..d40d21e 100644
91478--- a/kernel/ptrace.c
91479+++ b/kernel/ptrace.c
91480@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91481 if (seize)
91482 flags |= PT_SEIZED;
91483 rcu_read_lock();
91484- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91485+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91486 flags |= PT_PTRACE_CAP;
91487 rcu_read_unlock();
91488 task->ptrace = flags;
91489@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91490 break;
91491 return -EIO;
91492 }
91493- if (copy_to_user(dst, buf, retval))
91494+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91495 return -EFAULT;
91496 copied += retval;
91497 src += retval;
91498@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91499 bool seized = child->ptrace & PT_SEIZED;
91500 int ret = -EIO;
91501 siginfo_t siginfo, *si;
91502- void __user *datavp = (void __user *) data;
91503+ void __user *datavp = (__force void __user *) data;
91504 unsigned long __user *datalp = datavp;
91505 unsigned long flags;
91506
91507@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91508 goto out;
91509 }
91510
91511+ if (gr_handle_ptrace(child, request)) {
91512+ ret = -EPERM;
91513+ goto out_put_task_struct;
91514+ }
91515+
91516 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91517 ret = ptrace_attach(child, request, addr, data);
91518 /*
91519 * Some architectures need to do book-keeping after
91520 * a ptrace attach.
91521 */
91522- if (!ret)
91523+ if (!ret) {
91524 arch_ptrace_attach(child);
91525+ gr_audit_ptrace(child);
91526+ }
91527 goto out_put_task_struct;
91528 }
91529
91530@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91531 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91532 if (copied != sizeof(tmp))
91533 return -EIO;
91534- return put_user(tmp, (unsigned long __user *)data);
91535+ return put_user(tmp, (__force unsigned long __user *)data);
91536 }
91537
91538 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91539@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91540 }
91541
91542 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91543- compat_long_t, addr, compat_long_t, data)
91544+ compat_ulong_t, addr, compat_ulong_t, data)
91545 {
91546 struct task_struct *child;
91547 long ret;
91548@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91549 goto out;
91550 }
91551
91552+ if (gr_handle_ptrace(child, request)) {
91553+ ret = -EPERM;
91554+ goto out_put_task_struct;
91555+ }
91556+
91557 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91558 ret = ptrace_attach(child, request, addr, data);
91559 /*
91560 * Some architectures need to do book-keeping after
91561 * a ptrace attach.
91562 */
91563- if (!ret)
91564+ if (!ret) {
91565 arch_ptrace_attach(child);
91566+ gr_audit_ptrace(child);
91567+ }
91568 goto out_put_task_struct;
91569 }
91570
91571diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91572index 4d559ba..053da37 100644
91573--- a/kernel/rcu/rcutorture.c
91574+++ b/kernel/rcu/rcutorture.c
91575@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91576 rcu_torture_count) = { 0 };
91577 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91578 rcu_torture_batch) = { 0 };
91579-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91580-static atomic_t n_rcu_torture_alloc;
91581-static atomic_t n_rcu_torture_alloc_fail;
91582-static atomic_t n_rcu_torture_free;
91583-static atomic_t n_rcu_torture_mberror;
91584-static atomic_t n_rcu_torture_error;
91585+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91586+static atomic_unchecked_t n_rcu_torture_alloc;
91587+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91588+static atomic_unchecked_t n_rcu_torture_free;
91589+static atomic_unchecked_t n_rcu_torture_mberror;
91590+static atomic_unchecked_t n_rcu_torture_error;
91591 static long n_rcu_torture_barrier_error;
91592 static long n_rcu_torture_boost_ktrerror;
91593 static long n_rcu_torture_boost_rterror;
91594@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91595 static long n_rcu_torture_timers;
91596 static long n_barrier_attempts;
91597 static long n_barrier_successes;
91598-static atomic_long_t n_cbfloods;
91599+static atomic_long_unchecked_t n_cbfloods;
91600 static struct list_head rcu_torture_removed;
91601
91602 static int rcu_torture_writer_state;
91603@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91604
91605 spin_lock_bh(&rcu_torture_lock);
91606 if (list_empty(&rcu_torture_freelist)) {
91607- atomic_inc(&n_rcu_torture_alloc_fail);
91608+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91609 spin_unlock_bh(&rcu_torture_lock);
91610 return NULL;
91611 }
91612- atomic_inc(&n_rcu_torture_alloc);
91613+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91614 p = rcu_torture_freelist.next;
91615 list_del_init(p);
91616 spin_unlock_bh(&rcu_torture_lock);
91617@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91618 static void
91619 rcu_torture_free(struct rcu_torture *p)
91620 {
91621- atomic_inc(&n_rcu_torture_free);
91622+ atomic_inc_unchecked(&n_rcu_torture_free);
91623 spin_lock_bh(&rcu_torture_lock);
91624 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91625 spin_unlock_bh(&rcu_torture_lock);
91626@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91627 i = rp->rtort_pipe_count;
91628 if (i > RCU_TORTURE_PIPE_LEN)
91629 i = RCU_TORTURE_PIPE_LEN;
91630- atomic_inc(&rcu_torture_wcount[i]);
91631+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91632 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91633 rp->rtort_mbtest = 0;
91634 return true;
91635@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91636 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91637 do {
91638 schedule_timeout_interruptible(cbflood_inter_holdoff);
91639- atomic_long_inc(&n_cbfloods);
91640+ atomic_long_inc_unchecked(&n_cbfloods);
91641 WARN_ON(signal_pending(current));
91642 for (i = 0; i < cbflood_n_burst; i++) {
91643 for (j = 0; j < cbflood_n_per_burst; j++) {
91644@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91645 i = old_rp->rtort_pipe_count;
91646 if (i > RCU_TORTURE_PIPE_LEN)
91647 i = RCU_TORTURE_PIPE_LEN;
91648- atomic_inc(&rcu_torture_wcount[i]);
91649+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91650 old_rp->rtort_pipe_count++;
91651 switch (synctype[torture_random(&rand) % nsynctypes]) {
91652 case RTWS_DEF_FREE:
91653@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91654 return;
91655 }
91656 if (p->rtort_mbtest == 0)
91657- atomic_inc(&n_rcu_torture_mberror);
91658+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91659 spin_lock(&rand_lock);
91660 cur_ops->read_delay(&rand);
91661 n_rcu_torture_timers++;
91662@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91663 continue;
91664 }
91665 if (p->rtort_mbtest == 0)
91666- atomic_inc(&n_rcu_torture_mberror);
91667+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91668 cur_ops->read_delay(&rand);
91669 preempt_disable();
91670 pipe_count = p->rtort_pipe_count;
91671@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91672 rcu_torture_current,
91673 rcu_torture_current_version,
91674 list_empty(&rcu_torture_freelist),
91675- atomic_read(&n_rcu_torture_alloc),
91676- atomic_read(&n_rcu_torture_alloc_fail),
91677- atomic_read(&n_rcu_torture_free));
91678+ atomic_read_unchecked(&n_rcu_torture_alloc),
91679+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91680+ atomic_read_unchecked(&n_rcu_torture_free));
91681 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91682- atomic_read(&n_rcu_torture_mberror),
91683+ atomic_read_unchecked(&n_rcu_torture_mberror),
91684 n_rcu_torture_boost_ktrerror,
91685 n_rcu_torture_boost_rterror);
91686 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91687@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91688 n_barrier_successes,
91689 n_barrier_attempts,
91690 n_rcu_torture_barrier_error);
91691- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91692+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91693
91694 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91695- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91696+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91697 n_rcu_torture_barrier_error != 0 ||
91698 n_rcu_torture_boost_ktrerror != 0 ||
91699 n_rcu_torture_boost_rterror != 0 ||
91700 n_rcu_torture_boost_failure != 0 ||
91701 i > 1) {
91702 pr_cont("%s", "!!! ");
91703- atomic_inc(&n_rcu_torture_error);
91704+ atomic_inc_unchecked(&n_rcu_torture_error);
91705 WARN_ON_ONCE(1);
91706 }
91707 pr_cont("Reader Pipe: ");
91708@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91709 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91710 pr_cont("Free-Block Circulation: ");
91711 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91712- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91713+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91714 }
91715 pr_cont("\n");
91716
91717@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91718
91719 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91720
91721- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91722+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91723 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91724 else if (torture_onoff_failures())
91725 rcu_torture_print_module_parms(cur_ops,
91726@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91727
91728 rcu_torture_current = NULL;
91729 rcu_torture_current_version = 0;
91730- atomic_set(&n_rcu_torture_alloc, 0);
91731- atomic_set(&n_rcu_torture_alloc_fail, 0);
91732- atomic_set(&n_rcu_torture_free, 0);
91733- atomic_set(&n_rcu_torture_mberror, 0);
91734- atomic_set(&n_rcu_torture_error, 0);
91735+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91736+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91737+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91738+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91739+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91740 n_rcu_torture_barrier_error = 0;
91741 n_rcu_torture_boost_ktrerror = 0;
91742 n_rcu_torture_boost_rterror = 0;
91743 n_rcu_torture_boost_failure = 0;
91744 n_rcu_torture_boosts = 0;
91745 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91746- atomic_set(&rcu_torture_wcount[i], 0);
91747+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91748 for_each_possible_cpu(cpu) {
91749 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91750 per_cpu(rcu_torture_count, cpu)[i] = 0;
91751diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91752index 0db5649..e6ec167 100644
91753--- a/kernel/rcu/tiny.c
91754+++ b/kernel/rcu/tiny.c
91755@@ -42,7 +42,7 @@
91756 /* Forward declarations for tiny_plugin.h. */
91757 struct rcu_ctrlblk;
91758 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91759-static void rcu_process_callbacks(struct softirq_action *unused);
91760+static void rcu_process_callbacks(void);
91761 static void __call_rcu(struct rcu_head *head,
91762 void (*func)(struct rcu_head *rcu),
91763 struct rcu_ctrlblk *rcp);
91764@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91765 false));
91766 }
91767
91768-static void rcu_process_callbacks(struct softirq_action *unused)
91769+static __latent_entropy void rcu_process_callbacks(void)
91770 {
91771 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91772 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91773diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91774index 858c565..7efd915 100644
91775--- a/kernel/rcu/tiny_plugin.h
91776+++ b/kernel/rcu/tiny_plugin.h
91777@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91778 dump_stack();
91779 }
91780 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91781- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91782+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91783 3 * rcu_jiffies_till_stall_check() + 3;
91784 else if (ULONG_CMP_GE(j, js))
91785- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91786+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91787 }
91788
91789 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91790 {
91791 rcp->ticks_this_gp = 0;
91792 rcp->gp_start = jiffies;
91793- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91794+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91795 }
91796
91797 static void check_cpu_stalls(void)
91798diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91799index 7680fc2..b8e9161 100644
91800--- a/kernel/rcu/tree.c
91801+++ b/kernel/rcu/tree.c
91802@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91803 */
91804 rdtp = this_cpu_ptr(&rcu_dynticks);
91805 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91806- atomic_add(2, &rdtp->dynticks); /* QS. */
91807+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91808 smp_mb__after_atomic(); /* Later stuff after QS. */
91809 break;
91810 }
91811@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91812 rcu_prepare_for_idle();
91813 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91814 smp_mb__before_atomic(); /* See above. */
91815- atomic_inc(&rdtp->dynticks);
91816+ atomic_inc_unchecked(&rdtp->dynticks);
91817 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91818- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91819+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91820 rcu_dynticks_task_enter();
91821
91822 /*
91823@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91824
91825 rcu_dynticks_task_exit();
91826 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91827- atomic_inc(&rdtp->dynticks);
91828+ atomic_inc_unchecked(&rdtp->dynticks);
91829 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91830 smp_mb__after_atomic(); /* See above. */
91831- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91832+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91833 rcu_cleanup_after_idle();
91834 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91835 if (!user && !is_idle_task(current)) {
91836@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91837 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91838
91839 if (rdtp->dynticks_nmi_nesting == 0 &&
91840- (atomic_read(&rdtp->dynticks) & 0x1))
91841+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91842 return;
91843 rdtp->dynticks_nmi_nesting++;
91844 smp_mb__before_atomic(); /* Force delay from prior write. */
91845- atomic_inc(&rdtp->dynticks);
91846+ atomic_inc_unchecked(&rdtp->dynticks);
91847 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91848 smp_mb__after_atomic(); /* See above. */
91849- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91850+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91851 }
91852
91853 /**
91854@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91855 return;
91856 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91857 smp_mb__before_atomic(); /* See above. */
91858- atomic_inc(&rdtp->dynticks);
91859+ atomic_inc_unchecked(&rdtp->dynticks);
91860 smp_mb__after_atomic(); /* Force delay to next write. */
91861- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91862+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91863 }
91864
91865 /**
91866@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91867 */
91868 bool notrace __rcu_is_watching(void)
91869 {
91870- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91871+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91872 }
91873
91874 /**
91875@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91876 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91877 bool *isidle, unsigned long *maxj)
91878 {
91879- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91880+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91881 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91882 if ((rdp->dynticks_snap & 0x1) == 0) {
91883 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91884@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91885 int *rcrmp;
91886 unsigned int snap;
91887
91888- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91889+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91890 snap = (unsigned int)rdp->dynticks_snap;
91891
91892 /*
91893@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91894 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91895 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91896 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91897- ACCESS_ONCE(rdp->cond_resched_completed) =
91898+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91899 ACCESS_ONCE(rdp->mynode->completed);
91900 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91901- ACCESS_ONCE(*rcrmp) =
91902+ ACCESS_ONCE_RW(*rcrmp) =
91903 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91904 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91905 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91906@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91907 rsp->gp_start = j;
91908 smp_wmb(); /* Record start time before stall time. */
91909 j1 = rcu_jiffies_till_stall_check();
91910- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91911+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91912 rsp->jiffies_resched = j + j1 / 2;
91913 }
91914
91915@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91916 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91917 return;
91918 }
91919- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91920+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91921 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91922
91923 /*
91924@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
91925
91926 raw_spin_lock_irqsave(&rnp->lock, flags);
91927 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
91928- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
91929+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
91930 3 * rcu_jiffies_till_stall_check() + 3;
91931 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91932
91933@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
91934 struct rcu_state *rsp;
91935
91936 for_each_rcu_flavor(rsp)
91937- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91938+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91939 }
91940
91941 /*
91942@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
91943 raw_spin_unlock_irq(&rnp->lock);
91944 return 0;
91945 }
91946- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91947+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91948
91949 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
91950 /*
91951@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
91952 rdp = this_cpu_ptr(rsp->rda);
91953 rcu_preempt_check_blocked_tasks(rnp);
91954 rnp->qsmask = rnp->qsmaskinit;
91955- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
91956+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
91957 WARN_ON_ONCE(rnp->completed != rsp->completed);
91958- ACCESS_ONCE(rnp->completed) = rsp->completed;
91959+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
91960 if (rnp == rdp->mynode)
91961 (void)__note_gp_changes(rsp, rnp, rdp);
91962 rcu_preempt_boost_start_gp(rnp);
91963@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
91964 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
91965 raw_spin_lock_irq(&rnp->lock);
91966 smp_mb__after_unlock_lock();
91967- ACCESS_ONCE(rsp->gp_flags) =
91968+ ACCESS_ONCE_RW(rsp->gp_flags) =
91969 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
91970 raw_spin_unlock_irq(&rnp->lock);
91971 }
91972@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91973 rcu_for_each_node_breadth_first(rsp, rnp) {
91974 raw_spin_lock_irq(&rnp->lock);
91975 smp_mb__after_unlock_lock();
91976- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
91977+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
91978 rdp = this_cpu_ptr(rsp->rda);
91979 if (rnp == rdp->mynode)
91980 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
91981@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91982 rcu_nocb_gp_set(rnp, nocb);
91983
91984 /* Declare grace period done. */
91985- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
91986+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
91987 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
91988 rsp->fqs_state = RCU_GP_IDLE;
91989 rdp = this_cpu_ptr(rsp->rda);
91990 /* Advance CBs to reduce false positives below. */
91991 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
91992 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
91993- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91994+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91995 trace_rcu_grace_period(rsp->name,
91996 ACCESS_ONCE(rsp->gpnum),
91997 TPS("newreq"));
91998@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
91999 */
92000 return false;
92001 }
92002- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92003+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92004 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92005 TPS("newreq"));
92006
92007@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92008 rsp->qlen += rdp->qlen;
92009 rdp->n_cbs_orphaned += rdp->qlen;
92010 rdp->qlen_lazy = 0;
92011- ACCESS_ONCE(rdp->qlen) = 0;
92012+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92013 }
92014
92015 /*
92016@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92017 }
92018 smp_mb(); /* List handling before counting for rcu_barrier(). */
92019 rdp->qlen_lazy -= count_lazy;
92020- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92021+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92022 rdp->n_cbs_invoked += count;
92023
92024 /* Reinstate batch limit if we have worked down the excess. */
92025@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92026 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92027 return; /* Someone beat us to it. */
92028 }
92029- ACCESS_ONCE(rsp->gp_flags) =
92030+ ACCESS_ONCE_RW(rsp->gp_flags) =
92031 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
92032 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92033 rcu_gp_kthread_wake(rsp);
92034@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92035 /*
92036 * Do RCU core processing for the current CPU.
92037 */
92038-static void rcu_process_callbacks(struct softirq_action *unused)
92039+static void rcu_process_callbacks(void)
92040 {
92041 struct rcu_state *rsp;
92042
92043@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92044 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92045 if (debug_rcu_head_queue(head)) {
92046 /* Probable double call_rcu(), so leak the callback. */
92047- ACCESS_ONCE(head->func) = rcu_leak_callback;
92048+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92049 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92050 return;
92051 }
92052@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92053 local_irq_restore(flags);
92054 return;
92055 }
92056- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92057+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92058 if (lazy)
92059 rdp->qlen_lazy++;
92060 else
92061@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
92062 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92063 * course be required on a 64-bit system.
92064 */
92065- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92066+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92067 (ulong)atomic_long_read(&rsp->expedited_done) +
92068 ULONG_MAX / 8)) {
92069 synchronize_sched();
92070- atomic_long_inc(&rsp->expedited_wrap);
92071+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92072 return;
92073 }
92074
92075@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
92076 * Take a ticket. Note that atomic_inc_return() implies a
92077 * full memory barrier.
92078 */
92079- snap = atomic_long_inc_return(&rsp->expedited_start);
92080+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92081 firstsnap = snap;
92082 if (!try_get_online_cpus()) {
92083 /* CPU hotplug operation in flight, fall back to normal GP. */
92084 wait_rcu_gp(call_rcu_sched);
92085- atomic_long_inc(&rsp->expedited_normal);
92086+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92087 return;
92088 }
92089 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92090@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
92091 for_each_cpu(cpu, cm) {
92092 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92093
92094- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92095+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92096 cpumask_clear_cpu(cpu, cm);
92097 }
92098 if (cpumask_weight(cm) == 0)
92099@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92100 synchronize_sched_expedited_cpu_stop,
92101 NULL) == -EAGAIN) {
92102 put_online_cpus();
92103- atomic_long_inc(&rsp->expedited_tryfail);
92104+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92105
92106 /* Check to see if someone else did our work for us. */
92107 s = atomic_long_read(&rsp->expedited_done);
92108 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92109 /* ensure test happens before caller kfree */
92110 smp_mb__before_atomic(); /* ^^^ */
92111- atomic_long_inc(&rsp->expedited_workdone1);
92112+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92113 free_cpumask_var(cm);
92114 return;
92115 }
92116@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92117 udelay(trycount * num_online_cpus());
92118 } else {
92119 wait_rcu_gp(call_rcu_sched);
92120- atomic_long_inc(&rsp->expedited_normal);
92121+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92122 free_cpumask_var(cm);
92123 return;
92124 }
92125@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92126 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92127 /* ensure test happens before caller kfree */
92128 smp_mb__before_atomic(); /* ^^^ */
92129- atomic_long_inc(&rsp->expedited_workdone2);
92130+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92131 free_cpumask_var(cm);
92132 return;
92133 }
92134@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92135 if (!try_get_online_cpus()) {
92136 /* CPU hotplug operation in flight, use normal GP. */
92137 wait_rcu_gp(call_rcu_sched);
92138- atomic_long_inc(&rsp->expedited_normal);
92139+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92140 free_cpumask_var(cm);
92141 return;
92142 }
92143- snap = atomic_long_read(&rsp->expedited_start);
92144+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92145 smp_mb(); /* ensure read is before try_stop_cpus(). */
92146 }
92147- atomic_long_inc(&rsp->expedited_stoppedcpus);
92148+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92149
92150 all_cpus_idle:
92151 free_cpumask_var(cm);
92152@@ -3072,16 +3072,16 @@ all_cpus_idle:
92153 * than we did already did their update.
92154 */
92155 do {
92156- atomic_long_inc(&rsp->expedited_done_tries);
92157+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92158 s = atomic_long_read(&rsp->expedited_done);
92159 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92160 /* ensure test happens before caller kfree */
92161 smp_mb__before_atomic(); /* ^^^ */
92162- atomic_long_inc(&rsp->expedited_done_lost);
92163+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92164 break;
92165 }
92166 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92167- atomic_long_inc(&rsp->expedited_done_exit);
92168+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92169
92170 put_online_cpus();
92171 }
92172@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92173 * ACCESS_ONCE() to prevent the compiler from speculating
92174 * the increment to precede the early-exit check.
92175 */
92176- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92177+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92178 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92179 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92180 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92181@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92182
92183 /* Increment ->n_barrier_done to prevent duplicate work. */
92184 smp_mb(); /* Keep increment after above mechanism. */
92185- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92186+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92187 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92188 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92189 smp_mb(); /* Keep increment before caller's subsequent code. */
92190@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92191 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92192 init_callback_list(rdp);
92193 rdp->qlen_lazy = 0;
92194- ACCESS_ONCE(rdp->qlen) = 0;
92195+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92196 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92197 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92198- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92199+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92200 rdp->cpu = cpu;
92201 rdp->rsp = rsp;
92202 rcu_boot_init_nocb_percpu_data(rdp);
92203@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92204 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92205 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92206 rcu_sysidle_init_percpu_data(rdp->dynticks);
92207- atomic_set(&rdp->dynticks->dynticks,
92208- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92209+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92210+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92211 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92212
92213 /* Add CPU to rcu_node bitmasks. */
92214diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92215index 8e7b184..9c55768 100644
92216--- a/kernel/rcu/tree.h
92217+++ b/kernel/rcu/tree.h
92218@@ -87,11 +87,11 @@ struct rcu_dynticks {
92219 long long dynticks_nesting; /* Track irq/process nesting level. */
92220 /* Process level is worth LLONG_MAX/2. */
92221 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92222- atomic_t dynticks; /* Even value for idle, else odd. */
92223+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92224 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92225 long long dynticks_idle_nesting;
92226 /* irq/process nesting level from idle. */
92227- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92228+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92229 /* "Idle" excludes userspace execution. */
92230 unsigned long dynticks_idle_jiffies;
92231 /* End of last non-NMI non-idle period. */
92232@@ -466,17 +466,17 @@ struct rcu_state {
92233 /* _rcu_barrier(). */
92234 /* End of fields guarded by barrier_mutex. */
92235
92236- atomic_long_t expedited_start; /* Starting ticket. */
92237- atomic_long_t expedited_done; /* Done ticket. */
92238- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92239- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92240- atomic_long_t expedited_workdone1; /* # done by others #1. */
92241- atomic_long_t expedited_workdone2; /* # done by others #2. */
92242- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92243- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92244- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92245- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92246- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92247+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92248+ atomic_long_t expedited_done; /* Done ticket. */
92249+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92250+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92251+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92252+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92253+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92254+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92255+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92256+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92257+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92258
92259 unsigned long jiffies_force_qs; /* Time at which to invoke */
92260 /* force_quiescent_state(). */
92261diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92262index 3ec85cb..3687925 100644
92263--- a/kernel/rcu/tree_plugin.h
92264+++ b/kernel/rcu/tree_plugin.h
92265@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92266 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92267 {
92268 return !rcu_preempted_readers_exp(rnp) &&
92269- ACCESS_ONCE(rnp->expmask) == 0;
92270+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92271 }
92272
92273 /*
92274@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92275
92276 /* Clean up and exit. */
92277 smp_mb(); /* ensure expedited GP seen before counter increment. */
92278- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92279+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92280 sync_rcu_preempt_exp_count + 1;
92281 unlock_mb_ret:
92282 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92283@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92284 free_cpumask_var(cm);
92285 }
92286
92287-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92288+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92289 .store = &rcu_cpu_kthread_task,
92290 .thread_should_run = rcu_cpu_kthread_should_run,
92291 .thread_fn = rcu_cpu_kthread,
92292@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92293 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92294 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92295 cpu, ticks_value, ticks_title,
92296- atomic_read(&rdtp->dynticks) & 0xfff,
92297+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92298 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92299 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92300 fast_no_hz);
92301@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92302 return;
92303 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92304 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92305- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92306+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92307 wake_up(&rdp_leader->nocb_wq);
92308 }
92309 }
92310@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92311
92312 /* Enqueue the callback on the nocb list and update counts. */
92313 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92314- ACCESS_ONCE(*old_rhpp) = rhp;
92315+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92316 atomic_long_add(rhcount, &rdp->nocb_q_count);
92317 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92318 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92319@@ -2286,7 +2286,7 @@ wait_again:
92320 continue; /* No CBs here, try next follower. */
92321
92322 /* Move callbacks to wait-for-GP list, which is empty. */
92323- ACCESS_ONCE(rdp->nocb_head) = NULL;
92324+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92325 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92326 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92327 rdp->nocb_gp_count_lazy =
92328@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92329 list = ACCESS_ONCE(rdp->nocb_follower_head);
92330 BUG_ON(!list);
92331 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92332- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92333+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92334 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92335 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92336 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92337@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92338 list = next;
92339 }
92340 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92341- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92342- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92343+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92344+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92345 rdp->nocb_p_count_lazy - cl;
92346 rdp->n_nocbs_invoked += c;
92347 }
92348@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92349 if (!rcu_nocb_need_deferred_wakeup(rdp))
92350 return;
92351 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92352- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92353+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92354 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92355 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92356 }
92357@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92358 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92359 "rcuo%c/%d", rsp->abbr, cpu);
92360 BUG_ON(IS_ERR(t));
92361- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92362+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92363 }
92364
92365 /*
92366@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92367
92368 /* Record start of fully idle period. */
92369 j = jiffies;
92370- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92371+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92372 smp_mb__before_atomic();
92373- atomic_inc(&rdtp->dynticks_idle);
92374+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92375 smp_mb__after_atomic();
92376- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92377+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92378 }
92379
92380 /*
92381@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92382
92383 /* Record end of idle period. */
92384 smp_mb__before_atomic();
92385- atomic_inc(&rdtp->dynticks_idle);
92386+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92387 smp_mb__after_atomic();
92388- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92389+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92390
92391 /*
92392 * If we are the timekeeping CPU, we are permitted to be non-idle
92393@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92394 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92395
92396 /* Pick up current idle and NMI-nesting counter and check. */
92397- cur = atomic_read(&rdtp->dynticks_idle);
92398+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92399 if (cur & 0x1) {
92400 *isidle = false; /* We are not idle! */
92401 return;
92402@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92403 case RCU_SYSIDLE_NOT:
92404
92405 /* First time all are idle, so note a short idle period. */
92406- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92407+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92408 break;
92409
92410 case RCU_SYSIDLE_SHORT:
92411@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92412 {
92413 smp_mb();
92414 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92415- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92416+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92417 }
92418
92419 /*
92420@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92421 smp_mb(); /* grace period precedes setting inuse. */
92422
92423 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92424- ACCESS_ONCE(rshp->inuse) = 0;
92425+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92426 }
92427
92428 /*
92429@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92430 static void rcu_dynticks_task_enter(void)
92431 {
92432 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92433- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92434+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92435 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92436 }
92437
92438@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92439 static void rcu_dynticks_task_exit(void)
92440 {
92441 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92442- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92443+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92444 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92445 }
92446diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92447index 5cdc62e..cc52e88 100644
92448--- a/kernel/rcu/tree_trace.c
92449+++ b/kernel/rcu/tree_trace.c
92450@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92451 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92452 rdp->passed_quiesce, rdp->qs_pending);
92453 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92454- atomic_read(&rdp->dynticks->dynticks),
92455+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92456 rdp->dynticks->dynticks_nesting,
92457 rdp->dynticks->dynticks_nmi_nesting,
92458 rdp->dynticks_fqs);
92459@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92460 struct rcu_state *rsp = (struct rcu_state *)m->private;
92461
92462 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92463- atomic_long_read(&rsp->expedited_start),
92464+ atomic_long_read_unchecked(&rsp->expedited_start),
92465 atomic_long_read(&rsp->expedited_done),
92466- atomic_long_read(&rsp->expedited_wrap),
92467- atomic_long_read(&rsp->expedited_tryfail),
92468- atomic_long_read(&rsp->expedited_workdone1),
92469- atomic_long_read(&rsp->expedited_workdone2),
92470- atomic_long_read(&rsp->expedited_normal),
92471- atomic_long_read(&rsp->expedited_stoppedcpus),
92472- atomic_long_read(&rsp->expedited_done_tries),
92473- atomic_long_read(&rsp->expedited_done_lost),
92474- atomic_long_read(&rsp->expedited_done_exit));
92475+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92476+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92477+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92478+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92479+ atomic_long_read_unchecked(&rsp->expedited_normal),
92480+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92481+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92482+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92483+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92484 return 0;
92485 }
92486
92487diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92488index e0d31a3..f4dafe3 100644
92489--- a/kernel/rcu/update.c
92490+++ b/kernel/rcu/update.c
92491@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92492 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92493 */
92494 if (till_stall_check < 3) {
92495- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92496+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92497 till_stall_check = 3;
92498 } else if (till_stall_check > 300) {
92499- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92500+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92501 till_stall_check = 300;
92502 }
92503 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92504@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92505 !ACCESS_ONCE(t->on_rq) ||
92506 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92507 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92508- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92509+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92510 list_del_init(&t->rcu_tasks_holdout_list);
92511 put_task_struct(t);
92512 return;
92513@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92514 !is_idle_task(t)) {
92515 get_task_struct(t);
92516 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92517- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92518+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92519 list_add(&t->rcu_tasks_holdout_list,
92520 &rcu_tasks_holdouts);
92521 }
92522@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92523 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92524 BUG_ON(IS_ERR(t));
92525 smp_mb(); /* Ensure others see full kthread. */
92526- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92527+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92528 mutex_unlock(&rcu_tasks_kthread_mutex);
92529 }
92530
92531diff --git a/kernel/resource.c b/kernel/resource.c
92532index 0bcebff..e7cd5b2 100644
92533--- a/kernel/resource.c
92534+++ b/kernel/resource.c
92535@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92536
92537 static int __init ioresources_init(void)
92538 {
92539+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92540+#ifdef CONFIG_GRKERNSEC_PROC_USER
92541+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92542+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92543+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92544+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92545+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92546+#endif
92547+#else
92548 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92549 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92550+#endif
92551 return 0;
92552 }
92553 __initcall(ioresources_init);
92554diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92555index eae160d..c9aa22e 100644
92556--- a/kernel/sched/auto_group.c
92557+++ b/kernel/sched/auto_group.c
92558@@ -11,7 +11,7 @@
92559
92560 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92561 static struct autogroup autogroup_default;
92562-static atomic_t autogroup_seq_nr;
92563+static atomic_unchecked_t autogroup_seq_nr;
92564
92565 void __init autogroup_init(struct task_struct *init_task)
92566 {
92567@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92568
92569 kref_init(&ag->kref);
92570 init_rwsem(&ag->lock);
92571- ag->id = atomic_inc_return(&autogroup_seq_nr);
92572+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92573 ag->tg = tg;
92574 #ifdef CONFIG_RT_GROUP_SCHED
92575 /*
92576diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92577index 607f852..486bc87 100644
92578--- a/kernel/sched/completion.c
92579+++ b/kernel/sched/completion.c
92580@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92581 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92582 * or number of jiffies left till timeout) if completed.
92583 */
92584-long __sched
92585+long __sched __intentional_overflow(-1)
92586 wait_for_completion_interruptible_timeout(struct completion *x,
92587 unsigned long timeout)
92588 {
92589@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92590 *
92591 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92592 */
92593-int __sched wait_for_completion_killable(struct completion *x)
92594+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92595 {
92596 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92597 if (t == -ERESTARTSYS)
92598@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92599 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92600 * or number of jiffies left till timeout) if completed.
92601 */
92602-long __sched
92603+long __sched __intentional_overflow(-1)
92604 wait_for_completion_killable_timeout(struct completion *x,
92605 unsigned long timeout)
92606 {
92607diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92608index 44dfc8b..56d160d 100644
92609--- a/kernel/sched/core.c
92610+++ b/kernel/sched/core.c
92611@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
92612 int sysctl_numa_balancing(struct ctl_table *table, int write,
92613 void __user *buffer, size_t *lenp, loff_t *ppos)
92614 {
92615- struct ctl_table t;
92616+ ctl_table_no_const t;
92617 int err;
92618 int state = numabalancing_enabled;
92619
92620@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92621 next->active_mm = oldmm;
92622 atomic_inc(&oldmm->mm_count);
92623 enter_lazy_tlb(oldmm, next);
92624- } else
92625+ } else {
92626 switch_mm(oldmm, mm, next);
92627+ populate_stack();
92628+ }
92629
92630 if (!prev->mm) {
92631 prev->active_mm = NULL;
92632@@ -3152,6 +3154,8 @@ int can_nice(const struct task_struct *p, const int nice)
92633 /* convert nice value [19,-20] to rlimit style value [1,40] */
92634 int nice_rlim = nice_to_rlimit(nice);
92635
92636+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92637+
92638 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92639 capable(CAP_SYS_NICE));
92640 }
92641@@ -3178,7 +3182,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92642 nice = task_nice(current) + increment;
92643
92644 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92645- if (increment < 0 && !can_nice(current, nice))
92646+ if (increment < 0 && (!can_nice(current, nice) ||
92647+ gr_handle_chroot_nice()))
92648 return -EPERM;
92649
92650 retval = security_task_setnice(current, nice);
92651@@ -3473,6 +3478,7 @@ recheck:
92652 if (policy != p->policy && !rlim_rtprio)
92653 return -EPERM;
92654
92655+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92656 /* can't increase priority */
92657 if (attr->sched_priority > p->rt_priority &&
92658 attr->sched_priority > rlim_rtprio)
92659@@ -4973,6 +4979,7 @@ void idle_task_exit(void)
92660
92661 if (mm != &init_mm) {
92662 switch_mm(mm, &init_mm, current);
92663+ populate_stack();
92664 finish_arch_post_lock_switch();
92665 }
92666 mmdrop(mm);
92667@@ -5068,7 +5075,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92668
92669 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92670
92671-static struct ctl_table sd_ctl_dir[] = {
92672+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92673 {
92674 .procname = "sched_domain",
92675 .mode = 0555,
92676@@ -5085,17 +5092,17 @@ static struct ctl_table sd_ctl_root[] = {
92677 {}
92678 };
92679
92680-static struct ctl_table *sd_alloc_ctl_entry(int n)
92681+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92682 {
92683- struct ctl_table *entry =
92684+ ctl_table_no_const *entry =
92685 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92686
92687 return entry;
92688 }
92689
92690-static void sd_free_ctl_entry(struct ctl_table **tablep)
92691+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92692 {
92693- struct ctl_table *entry;
92694+ ctl_table_no_const *entry;
92695
92696 /*
92697 * In the intermediate directories, both the child directory and
92698@@ -5103,22 +5110,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92699 * will always be set. In the lowest directory the names are
92700 * static strings and all have proc handlers.
92701 */
92702- for (entry = *tablep; entry->mode; entry++) {
92703- if (entry->child)
92704- sd_free_ctl_entry(&entry->child);
92705+ for (entry = tablep; entry->mode; entry++) {
92706+ if (entry->child) {
92707+ sd_free_ctl_entry(entry->child);
92708+ pax_open_kernel();
92709+ entry->child = NULL;
92710+ pax_close_kernel();
92711+ }
92712 if (entry->proc_handler == NULL)
92713 kfree(entry->procname);
92714 }
92715
92716- kfree(*tablep);
92717- *tablep = NULL;
92718+ kfree(tablep);
92719 }
92720
92721 static int min_load_idx = 0;
92722 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92723
92724 static void
92725-set_table_entry(struct ctl_table *entry,
92726+set_table_entry(ctl_table_no_const *entry,
92727 const char *procname, void *data, int maxlen,
92728 umode_t mode, proc_handler *proc_handler,
92729 bool load_idx)
92730@@ -5138,7 +5148,7 @@ set_table_entry(struct ctl_table *entry,
92731 static struct ctl_table *
92732 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92733 {
92734- struct ctl_table *table = sd_alloc_ctl_entry(14);
92735+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92736
92737 if (table == NULL)
92738 return NULL;
92739@@ -5176,9 +5186,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92740 return table;
92741 }
92742
92743-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92744+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92745 {
92746- struct ctl_table *entry, *table;
92747+ ctl_table_no_const *entry, *table;
92748 struct sched_domain *sd;
92749 int domain_num = 0, i;
92750 char buf[32];
92751@@ -5205,11 +5215,13 @@ static struct ctl_table_header *sd_sysctl_header;
92752 static void register_sched_domain_sysctl(void)
92753 {
92754 int i, cpu_num = num_possible_cpus();
92755- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92756+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92757 char buf[32];
92758
92759 WARN_ON(sd_ctl_dir[0].child);
92760+ pax_open_kernel();
92761 sd_ctl_dir[0].child = entry;
92762+ pax_close_kernel();
92763
92764 if (entry == NULL)
92765 return;
92766@@ -5232,8 +5244,12 @@ static void unregister_sched_domain_sysctl(void)
92767 if (sd_sysctl_header)
92768 unregister_sysctl_table(sd_sysctl_header);
92769 sd_sysctl_header = NULL;
92770- if (sd_ctl_dir[0].child)
92771- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92772+ if (sd_ctl_dir[0].child) {
92773+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92774+ pax_open_kernel();
92775+ sd_ctl_dir[0].child = NULL;
92776+ pax_close_kernel();
92777+ }
92778 }
92779 #else
92780 static void register_sched_domain_sysctl(void)
92781diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92782index fe331fc..29d620e 100644
92783--- a/kernel/sched/fair.c
92784+++ b/kernel/sched/fair.c
92785@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92786
92787 static void reset_ptenuma_scan(struct task_struct *p)
92788 {
92789- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92790+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92791 p->mm->numa_scan_offset = 0;
92792 }
92793
92794@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92795 * run_rebalance_domains is triggered when needed from the scheduler tick.
92796 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92797 */
92798-static void run_rebalance_domains(struct softirq_action *h)
92799+static __latent_entropy void run_rebalance_domains(void)
92800 {
92801 struct rq *this_rq = this_rq();
92802 enum cpu_idle_type idle = this_rq->idle_balance ?
92803diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92804index 9a2a45c..bb91ace 100644
92805--- a/kernel/sched/sched.h
92806+++ b/kernel/sched/sched.h
92807@@ -1182,7 +1182,7 @@ struct sched_class {
92808 #ifdef CONFIG_FAIR_GROUP_SCHED
92809 void (*task_move_group) (struct task_struct *p, int on_rq);
92810 #endif
92811-};
92812+} __do_const;
92813
92814 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92815 {
92816diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92817index 4ef9687..4f44028 100644
92818--- a/kernel/seccomp.c
92819+++ b/kernel/seccomp.c
92820@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92821
92822 switch (action) {
92823 case SECCOMP_RET_ERRNO:
92824- /* Set the low-order 16-bits as a errno. */
92825+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92826+ if (data > MAX_ERRNO)
92827+ data = MAX_ERRNO;
92828 syscall_set_return_value(current, task_pt_regs(current),
92829 -data, 0);
92830 goto skip;
92831diff --git a/kernel/signal.c b/kernel/signal.c
92832index 16a30529..25ad033 100644
92833--- a/kernel/signal.c
92834+++ b/kernel/signal.c
92835@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92836
92837 int print_fatal_signals __read_mostly;
92838
92839-static void __user *sig_handler(struct task_struct *t, int sig)
92840+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92841 {
92842 return t->sighand->action[sig - 1].sa.sa_handler;
92843 }
92844
92845-static int sig_handler_ignored(void __user *handler, int sig)
92846+static int sig_handler_ignored(__sighandler_t handler, int sig)
92847 {
92848 /* Is it explicitly or implicitly ignored? */
92849 return handler == SIG_IGN ||
92850@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92851
92852 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92853 {
92854- void __user *handler;
92855+ __sighandler_t handler;
92856
92857 handler = sig_handler(t, sig);
92858
92859@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92860 atomic_inc(&user->sigpending);
92861 rcu_read_unlock();
92862
92863+ if (!override_rlimit)
92864+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92865+
92866 if (override_rlimit ||
92867 atomic_read(&user->sigpending) <=
92868 task_rlimit(t, RLIMIT_SIGPENDING)) {
92869@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92870
92871 int unhandled_signal(struct task_struct *tsk, int sig)
92872 {
92873- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92874+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92875 if (is_global_init(tsk))
92876 return 1;
92877 if (handler != SIG_IGN && handler != SIG_DFL)
92878@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92879 }
92880 }
92881
92882+ /* allow glibc communication via tgkill to other threads in our
92883+ thread group */
92884+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92885+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92886+ && gr_handle_signal(t, sig))
92887+ return -EPERM;
92888+
92889 return security_task_kill(t, info, sig, 0);
92890 }
92891
92892@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92893 return send_signal(sig, info, p, 1);
92894 }
92895
92896-static int
92897+int
92898 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92899 {
92900 return send_signal(sig, info, t, 0);
92901@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92902 unsigned long int flags;
92903 int ret, blocked, ignored;
92904 struct k_sigaction *action;
92905+ int is_unhandled = 0;
92906
92907 spin_lock_irqsave(&t->sighand->siglock, flags);
92908 action = &t->sighand->action[sig-1];
92909@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92910 }
92911 if (action->sa.sa_handler == SIG_DFL)
92912 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92913+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92914+ is_unhandled = 1;
92915 ret = specific_send_sig_info(sig, info, t);
92916 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92917
92918+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92919+ normal operation */
92920+ if (is_unhandled) {
92921+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92922+ gr_handle_crash(t, sig);
92923+ }
92924+
92925 return ret;
92926 }
92927
92928@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92929 ret = check_kill_permission(sig, info, p);
92930 rcu_read_unlock();
92931
92932- if (!ret && sig)
92933+ if (!ret && sig) {
92934 ret = do_send_sig_info(sig, info, p, true);
92935+ if (!ret)
92936+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92937+ }
92938
92939 return ret;
92940 }
92941@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92942 int error = -ESRCH;
92943
92944 rcu_read_lock();
92945- p = find_task_by_vpid(pid);
92946+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92947+ /* allow glibc communication via tgkill to other threads in our
92948+ thread group */
92949+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92950+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92951+ p = find_task_by_vpid_unrestricted(pid);
92952+ else
92953+#endif
92954+ p = find_task_by_vpid(pid);
92955 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
92956 error = check_kill_permission(sig, info, p);
92957 /*
92958@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
92959 }
92960 seg = get_fs();
92961 set_fs(KERNEL_DS);
92962- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
92963- (stack_t __force __user *) &uoss,
92964+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
92965+ (stack_t __force_user *) &uoss,
92966 compat_user_stack_pointer());
92967 set_fs(seg);
92968 if (ret >= 0 && uoss_ptr) {
92969diff --git a/kernel/smpboot.c b/kernel/smpboot.c
92970index 40190f2..8861d40 100644
92971--- a/kernel/smpboot.c
92972+++ b/kernel/smpboot.c
92973@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
92974 }
92975 smpboot_unpark_thread(plug_thread, cpu);
92976 }
92977- list_add(&plug_thread->list, &hotplug_threads);
92978+ pax_list_add(&plug_thread->list, &hotplug_threads);
92979 out:
92980 mutex_unlock(&smpboot_threads_lock);
92981 put_online_cpus();
92982@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
92983 {
92984 get_online_cpus();
92985 mutex_lock(&smpboot_threads_lock);
92986- list_del(&plug_thread->list);
92987+ pax_list_del(&plug_thread->list);
92988 smpboot_destroy_threads(plug_thread);
92989 mutex_unlock(&smpboot_threads_lock);
92990 put_online_cpus();
92991diff --git a/kernel/softirq.c b/kernel/softirq.c
92992index c497fcd..e8f90a9 100644
92993--- a/kernel/softirq.c
92994+++ b/kernel/softirq.c
92995@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
92996 EXPORT_SYMBOL(irq_stat);
92997 #endif
92998
92999-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93000+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93001
93002 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93003
93004@@ -266,7 +266,7 @@ restart:
93005 kstat_incr_softirqs_this_cpu(vec_nr);
93006
93007 trace_softirq_entry(vec_nr);
93008- h->action(h);
93009+ h->action();
93010 trace_softirq_exit(vec_nr);
93011 if (unlikely(prev_count != preempt_count())) {
93012 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93013@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93014 or_softirq_pending(1UL << nr);
93015 }
93016
93017-void open_softirq(int nr, void (*action)(struct softirq_action *))
93018+void __init open_softirq(int nr, void (*action)(void))
93019 {
93020 softirq_vec[nr].action = action;
93021 }
93022@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93023 }
93024 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93025
93026-static void tasklet_action(struct softirq_action *a)
93027+static void tasklet_action(void)
93028 {
93029 struct tasklet_struct *list;
93030
93031@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93032 }
93033 }
93034
93035-static void tasklet_hi_action(struct softirq_action *a)
93036+static __latent_entropy void tasklet_hi_action(void)
93037 {
93038 struct tasklet_struct *list;
93039
93040@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
93041 .notifier_call = cpu_callback
93042 };
93043
93044-static struct smp_hotplug_thread softirq_threads = {
93045+static struct smp_hotplug_thread softirq_threads __read_only = {
93046 .store = &ksoftirqd,
93047 .thread_should_run = ksoftirqd_should_run,
93048 .thread_fn = run_ksoftirqd,
93049diff --git a/kernel/sys.c b/kernel/sys.c
93050index ea9c881..2194af5 100644
93051--- a/kernel/sys.c
93052+++ b/kernel/sys.c
93053@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93054 error = -EACCES;
93055 goto out;
93056 }
93057+
93058+ if (gr_handle_chroot_setpriority(p, niceval)) {
93059+ error = -EACCES;
93060+ goto out;
93061+ }
93062+
93063 no_nice = security_task_setnice(p, niceval);
93064 if (no_nice) {
93065 error = no_nice;
93066@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93067 goto error;
93068 }
93069
93070+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93071+ goto error;
93072+
93073+ if (!gid_eq(new->gid, old->gid)) {
93074+ /* make sure we generate a learn log for what will
93075+ end up being a role transition after a full-learning
93076+ policy is generated
93077+ CAP_SETGID is required to perform a transition
93078+ we may not log a CAP_SETGID check above, e.g.
93079+ in the case where new rgid = old egid
93080+ */
93081+ gr_learn_cap(current, new, CAP_SETGID);
93082+ }
93083+
93084 if (rgid != (gid_t) -1 ||
93085 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93086 new->sgid = new->egid;
93087@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93088 old = current_cred();
93089
93090 retval = -EPERM;
93091+
93092+ if (gr_check_group_change(kgid, kgid, kgid))
93093+ goto error;
93094+
93095 if (ns_capable(old->user_ns, CAP_SETGID))
93096 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93097 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93098@@ -411,7 +435,7 @@ error:
93099 /*
93100 * change the user struct in a credentials set to match the new UID
93101 */
93102-static int set_user(struct cred *new)
93103+int set_user(struct cred *new)
93104 {
93105 struct user_struct *new_user;
93106
93107@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93108 goto error;
93109 }
93110
93111+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93112+ goto error;
93113+
93114 if (!uid_eq(new->uid, old->uid)) {
93115+ /* make sure we generate a learn log for what will
93116+ end up being a role transition after a full-learning
93117+ policy is generated
93118+ CAP_SETUID is required to perform a transition
93119+ we may not log a CAP_SETUID check above, e.g.
93120+ in the case where new ruid = old euid
93121+ */
93122+ gr_learn_cap(current, new, CAP_SETUID);
93123 retval = set_user(new);
93124 if (retval < 0)
93125 goto error;
93126@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93127 old = current_cred();
93128
93129 retval = -EPERM;
93130+
93131+ if (gr_check_crash_uid(kuid))
93132+ goto error;
93133+ if (gr_check_user_change(kuid, kuid, kuid))
93134+ goto error;
93135+
93136 if (ns_capable(old->user_ns, CAP_SETUID)) {
93137 new->suid = new->uid = kuid;
93138 if (!uid_eq(kuid, old->uid)) {
93139@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93140 goto error;
93141 }
93142
93143+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93144+ goto error;
93145+
93146 if (ruid != (uid_t) -1) {
93147 new->uid = kruid;
93148 if (!uid_eq(kruid, old->uid)) {
93149@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93150 goto error;
93151 }
93152
93153+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93154+ goto error;
93155+
93156 if (rgid != (gid_t) -1)
93157 new->gid = krgid;
93158 if (egid != (gid_t) -1)
93159@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93160 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93161 ns_capable(old->user_ns, CAP_SETUID)) {
93162 if (!uid_eq(kuid, old->fsuid)) {
93163+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93164+ goto error;
93165+
93166 new->fsuid = kuid;
93167 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93168 goto change_okay;
93169 }
93170 }
93171
93172+error:
93173 abort_creds(new);
93174 return old_fsuid;
93175
93176@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93177 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93178 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93179 ns_capable(old->user_ns, CAP_SETGID)) {
93180+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93181+ goto error;
93182+
93183 if (!gid_eq(kgid, old->fsgid)) {
93184 new->fsgid = kgid;
93185 goto change_okay;
93186 }
93187 }
93188
93189+error:
93190 abort_creds(new);
93191 return old_fsgid;
93192
93193@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93194 return -EFAULT;
93195
93196 down_read(&uts_sem);
93197- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93198+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93199 __OLD_UTS_LEN);
93200 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93201- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93202+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93203 __OLD_UTS_LEN);
93204 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93205- error |= __copy_to_user(&name->release, &utsname()->release,
93206+ error |= __copy_to_user(name->release, &utsname()->release,
93207 __OLD_UTS_LEN);
93208 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93209- error |= __copy_to_user(&name->version, &utsname()->version,
93210+ error |= __copy_to_user(name->version, &utsname()->version,
93211 __OLD_UTS_LEN);
93212 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93213- error |= __copy_to_user(&name->machine, &utsname()->machine,
93214+ error |= __copy_to_user(name->machine, &utsname()->machine,
93215 __OLD_UTS_LEN);
93216 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93217 up_read(&uts_sem);
93218@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93219 */
93220 new_rlim->rlim_cur = 1;
93221 }
93222+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93223+ is changed to a lower value. Since tasks can be created by the same
93224+ user in between this limit change and an execve by this task, force
93225+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93226+ */
93227+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93228+ tsk->flags |= PF_NPROC_EXCEEDED;
93229 }
93230 if (!retval) {
93231 if (old_rlim)
93232diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93233index 88ea2d6..88acc77 100644
93234--- a/kernel/sysctl.c
93235+++ b/kernel/sysctl.c
93236@@ -94,7 +94,6 @@
93237
93238
93239 #if defined(CONFIG_SYSCTL)
93240-
93241 /* External variables not in a header file. */
93242 extern int max_threads;
93243 extern int suid_dumpable;
93244@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93245
93246 /* Constants used for minimum and maximum */
93247 #ifdef CONFIG_LOCKUP_DETECTOR
93248-static int sixty = 60;
93249+static int sixty __read_only = 60;
93250 #endif
93251
93252-static int __maybe_unused neg_one = -1;
93253+static int __maybe_unused neg_one __read_only = -1;
93254
93255-static int zero;
93256-static int __maybe_unused one = 1;
93257-static int __maybe_unused two = 2;
93258-static int __maybe_unused four = 4;
93259-static unsigned long one_ul = 1;
93260-static int one_hundred = 100;
93261+static int zero __read_only = 0;
93262+static int __maybe_unused one __read_only = 1;
93263+static int __maybe_unused two __read_only = 2;
93264+static int __maybe_unused three __read_only = 3;
93265+static int __maybe_unused four __read_only = 4;
93266+static unsigned long one_ul __read_only = 1;
93267+static int one_hundred __read_only = 100;
93268 #ifdef CONFIG_PRINTK
93269-static int ten_thousand = 10000;
93270+static int ten_thousand __read_only = 10000;
93271 #endif
93272
93273 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93274@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93275 void __user *buffer, size_t *lenp, loff_t *ppos);
93276 #endif
93277
93278-#ifdef CONFIG_PRINTK
93279 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93280 void __user *buffer, size_t *lenp, loff_t *ppos);
93281-#endif
93282
93283 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93284 void __user *buffer, size_t *lenp, loff_t *ppos);
93285@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93286
93287 #endif
93288
93289+extern struct ctl_table grsecurity_table[];
93290+
93291 static struct ctl_table kern_table[];
93292 static struct ctl_table vm_table[];
93293 static struct ctl_table fs_table[];
93294@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93295 int sysctl_legacy_va_layout;
93296 #endif
93297
93298+#ifdef CONFIG_PAX_SOFTMODE
93299+static struct ctl_table pax_table[] = {
93300+ {
93301+ .procname = "softmode",
93302+ .data = &pax_softmode,
93303+ .maxlen = sizeof(unsigned int),
93304+ .mode = 0600,
93305+ .proc_handler = &proc_dointvec,
93306+ },
93307+
93308+ { }
93309+};
93310+#endif
93311+
93312 /* The default sysctl tables: */
93313
93314 static struct ctl_table sysctl_base_table[] = {
93315@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93316 #endif
93317
93318 static struct ctl_table kern_table[] = {
93319+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93320+ {
93321+ .procname = "grsecurity",
93322+ .mode = 0500,
93323+ .child = grsecurity_table,
93324+ },
93325+#endif
93326+
93327+#ifdef CONFIG_PAX_SOFTMODE
93328+ {
93329+ .procname = "pax",
93330+ .mode = 0500,
93331+ .child = pax_table,
93332+ },
93333+#endif
93334+
93335 {
93336 .procname = "sched_child_runs_first",
93337 .data = &sysctl_sched_child_runs_first,
93338@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93339 .data = &modprobe_path,
93340 .maxlen = KMOD_PATH_LEN,
93341 .mode = 0644,
93342- .proc_handler = proc_dostring,
93343+ .proc_handler = proc_dostring_modpriv,
93344 },
93345 {
93346 .procname = "modules_disabled",
93347@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93348 .extra1 = &zero,
93349 .extra2 = &one,
93350 },
93351+#endif
93352 {
93353 .procname = "kptr_restrict",
93354 .data = &kptr_restrict,
93355 .maxlen = sizeof(int),
93356 .mode = 0644,
93357 .proc_handler = proc_dointvec_minmax_sysadmin,
93358+#ifdef CONFIG_GRKERNSEC_HIDESYM
93359+ .extra1 = &two,
93360+#else
93361 .extra1 = &zero,
93362+#endif
93363 .extra2 = &two,
93364 },
93365-#endif
93366 {
93367 .procname = "ngroups_max",
93368 .data = &ngroups_max,
93369@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93370 */
93371 {
93372 .procname = "perf_event_paranoid",
93373- .data = &sysctl_perf_event_paranoid,
93374- .maxlen = sizeof(sysctl_perf_event_paranoid),
93375+ .data = &sysctl_perf_event_legitimately_concerned,
93376+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93377 .mode = 0644,
93378- .proc_handler = proc_dointvec,
93379+ /* go ahead, be a hero */
93380+ .proc_handler = proc_dointvec_minmax_sysadmin,
93381+ .extra1 = &neg_one,
93382+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93383+ .extra2 = &three,
93384+#else
93385+ .extra2 = &two,
93386+#endif
93387 },
93388 {
93389 .procname = "perf_event_mlock_kb",
93390@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
93391 .proc_handler = proc_dointvec_minmax,
93392 .extra1 = &zero,
93393 },
93394+ {
93395+ .procname = "heap_stack_gap",
93396+ .data = &sysctl_heap_stack_gap,
93397+ .maxlen = sizeof(sysctl_heap_stack_gap),
93398+ .mode = 0644,
93399+ .proc_handler = proc_doulongvec_minmax,
93400+ },
93401 #else
93402 {
93403 .procname = "nr_trim_pages",
93404@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
93405 (char __user *)buffer, lenp, ppos);
93406 }
93407
93408+int proc_dostring_modpriv(struct ctl_table *table, int write,
93409+ void __user *buffer, size_t *lenp, loff_t *ppos)
93410+{
93411+ if (write && !capable(CAP_SYS_MODULE))
93412+ return -EPERM;
93413+
93414+ return _proc_do_string(table->data, table->maxlen, write,
93415+ buffer, lenp, ppos);
93416+}
93417+
93418 static size_t proc_skip_spaces(char **buf)
93419 {
93420 size_t ret;
93421@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93422 len = strlen(tmp);
93423 if (len > *size)
93424 len = *size;
93425+ if (len > sizeof(tmp))
93426+ len = sizeof(tmp);
93427 if (copy_to_user(*buf, tmp, len))
93428 return -EFAULT;
93429 *size -= len;
93430@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93431 static int proc_taint(struct ctl_table *table, int write,
93432 void __user *buffer, size_t *lenp, loff_t *ppos)
93433 {
93434- struct ctl_table t;
93435+ ctl_table_no_const t;
93436 unsigned long tmptaint = get_taint();
93437 int err;
93438
93439@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
93440 return err;
93441 }
93442
93443-#ifdef CONFIG_PRINTK
93444 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93445 void __user *buffer, size_t *lenp, loff_t *ppos)
93446 {
93447@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93448
93449 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93450 }
93451-#endif
93452
93453 struct do_proc_dointvec_minmax_conv_param {
93454 int *min;
93455@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
93456 return -ENOSYS;
93457 }
93458
93459+int proc_dostring_modpriv(struct ctl_table *table, int write,
93460+ void __user *buffer, size_t *lenp, loff_t *ppos)
93461+{
93462+ return -ENOSYS;
93463+}
93464+
93465 int proc_dointvec(struct ctl_table *table, int write,
93466 void __user *buffer, size_t *lenp, loff_t *ppos)
93467 {
93468@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93469 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93470 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93471 EXPORT_SYMBOL(proc_dostring);
93472+EXPORT_SYMBOL(proc_dostring_modpriv);
93473 EXPORT_SYMBOL(proc_doulongvec_minmax);
93474 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93475diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93476index 670fff8..a247812 100644
93477--- a/kernel/taskstats.c
93478+++ b/kernel/taskstats.c
93479@@ -28,9 +28,12 @@
93480 #include <linux/fs.h>
93481 #include <linux/file.h>
93482 #include <linux/pid_namespace.h>
93483+#include <linux/grsecurity.h>
93484 #include <net/genetlink.h>
93485 #include <linux/atomic.h>
93486
93487+extern int gr_is_taskstats_denied(int pid);
93488+
93489 /*
93490 * Maximum length of a cpumask that can be specified in
93491 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93492@@ -576,6 +579,9 @@ err:
93493
93494 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93495 {
93496+ if (gr_is_taskstats_denied(current->pid))
93497+ return -EACCES;
93498+
93499 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93500 return cmd_attr_register_cpumask(info);
93501 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93502diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93503index a7077d3..dd48a49 100644
93504--- a/kernel/time/alarmtimer.c
93505+++ b/kernel/time/alarmtimer.c
93506@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93507 struct platform_device *pdev;
93508 int error = 0;
93509 int i;
93510- struct k_clock alarm_clock = {
93511+ static struct k_clock alarm_clock = {
93512 .clock_getres = alarm_clock_getres,
93513 .clock_get = alarm_clock_get,
93514 .timer_create = alarm_timer_create,
93515diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93516index d8c724c..6b331a4 100644
93517--- a/kernel/time/hrtimer.c
93518+++ b/kernel/time/hrtimer.c
93519@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93520 local_irq_restore(flags);
93521 }
93522
93523-static void run_hrtimer_softirq(struct softirq_action *h)
93524+static __latent_entropy void run_hrtimer_softirq(void)
93525 {
93526 hrtimer_peek_ahead_timers();
93527 }
93528diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93529index a16b678..8c5bd9d 100644
93530--- a/kernel/time/posix-cpu-timers.c
93531+++ b/kernel/time/posix-cpu-timers.c
93532@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93533
93534 static __init int init_posix_cpu_timers(void)
93535 {
93536- struct k_clock process = {
93537+ static struct k_clock process = {
93538 .clock_getres = process_cpu_clock_getres,
93539 .clock_get = process_cpu_clock_get,
93540 .timer_create = process_cpu_timer_create,
93541 .nsleep = process_cpu_nsleep,
93542 .nsleep_restart = process_cpu_nsleep_restart,
93543 };
93544- struct k_clock thread = {
93545+ static struct k_clock thread = {
93546 .clock_getres = thread_cpu_clock_getres,
93547 .clock_get = thread_cpu_clock_get,
93548 .timer_create = thread_cpu_timer_create,
93549diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93550index 31ea01f..7fc61ef 100644
93551--- a/kernel/time/posix-timers.c
93552+++ b/kernel/time/posix-timers.c
93553@@ -43,6 +43,7 @@
93554 #include <linux/hash.h>
93555 #include <linux/posix-clock.h>
93556 #include <linux/posix-timers.h>
93557+#include <linux/grsecurity.h>
93558 #include <linux/syscalls.h>
93559 #include <linux/wait.h>
93560 #include <linux/workqueue.h>
93561@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93562 * which we beg off on and pass to do_sys_settimeofday().
93563 */
93564
93565-static struct k_clock posix_clocks[MAX_CLOCKS];
93566+static struct k_clock *posix_clocks[MAX_CLOCKS];
93567
93568 /*
93569 * These ones are defined below.
93570@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93571 */
93572 static __init int init_posix_timers(void)
93573 {
93574- struct k_clock clock_realtime = {
93575+ static struct k_clock clock_realtime = {
93576 .clock_getres = hrtimer_get_res,
93577 .clock_get = posix_clock_realtime_get,
93578 .clock_set = posix_clock_realtime_set,
93579@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93580 .timer_get = common_timer_get,
93581 .timer_del = common_timer_del,
93582 };
93583- struct k_clock clock_monotonic = {
93584+ static struct k_clock clock_monotonic = {
93585 .clock_getres = hrtimer_get_res,
93586 .clock_get = posix_ktime_get_ts,
93587 .nsleep = common_nsleep,
93588@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93589 .timer_get = common_timer_get,
93590 .timer_del = common_timer_del,
93591 };
93592- struct k_clock clock_monotonic_raw = {
93593+ static struct k_clock clock_monotonic_raw = {
93594 .clock_getres = hrtimer_get_res,
93595 .clock_get = posix_get_monotonic_raw,
93596 };
93597- struct k_clock clock_realtime_coarse = {
93598+ static struct k_clock clock_realtime_coarse = {
93599 .clock_getres = posix_get_coarse_res,
93600 .clock_get = posix_get_realtime_coarse,
93601 };
93602- struct k_clock clock_monotonic_coarse = {
93603+ static struct k_clock clock_monotonic_coarse = {
93604 .clock_getres = posix_get_coarse_res,
93605 .clock_get = posix_get_monotonic_coarse,
93606 };
93607- struct k_clock clock_tai = {
93608+ static struct k_clock clock_tai = {
93609 .clock_getres = hrtimer_get_res,
93610 .clock_get = posix_get_tai,
93611 .nsleep = common_nsleep,
93612@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93613 .timer_get = common_timer_get,
93614 .timer_del = common_timer_del,
93615 };
93616- struct k_clock clock_boottime = {
93617+ static struct k_clock clock_boottime = {
93618 .clock_getres = hrtimer_get_res,
93619 .clock_get = posix_get_boottime,
93620 .nsleep = common_nsleep,
93621@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93622 return;
93623 }
93624
93625- posix_clocks[clock_id] = *new_clock;
93626+ posix_clocks[clock_id] = new_clock;
93627 }
93628 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93629
93630@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93631 return (id & CLOCKFD_MASK) == CLOCKFD ?
93632 &clock_posix_dynamic : &clock_posix_cpu;
93633
93634- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93635+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93636 return NULL;
93637- return &posix_clocks[id];
93638+ return posix_clocks[id];
93639 }
93640
93641 static int common_timer_create(struct k_itimer *new_timer)
93642@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93643 struct k_clock *kc = clockid_to_kclock(which_clock);
93644 struct k_itimer *new_timer;
93645 int error, new_timer_id;
93646- sigevent_t event;
93647+ sigevent_t event = { };
93648 int it_id_set = IT_ID_NOT_SET;
93649
93650 if (!kc)
93651@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93652 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93653 return -EFAULT;
93654
93655+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93656+ have their clock_set fptr set to a nosettime dummy function
93657+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93658+ call common_clock_set, which calls do_sys_settimeofday, which
93659+ we hook
93660+ */
93661+
93662 return kc->clock_set(which_clock, &new_tp);
93663 }
93664
93665diff --git a/kernel/time/time.c b/kernel/time/time.c
93666index 2c85b77..6530536 100644
93667--- a/kernel/time/time.c
93668+++ b/kernel/time/time.c
93669@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93670 return error;
93671
93672 if (tz) {
93673+ /* we log in do_settimeofday called below, so don't log twice
93674+ */
93675+ if (!tv)
93676+ gr_log_timechange();
93677+
93678 sys_tz = *tz;
93679 update_vsyscall_tz();
93680 if (firsttime) {
93681diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93682index 6a93185..288c331 100644
93683--- a/kernel/time/timekeeping.c
93684+++ b/kernel/time/timekeeping.c
93685@@ -15,6 +15,7 @@
93686 #include <linux/init.h>
93687 #include <linux/mm.h>
93688 #include <linux/sched.h>
93689+#include <linux/grsecurity.h>
93690 #include <linux/syscore_ops.h>
93691 #include <linux/clocksource.h>
93692 #include <linux/jiffies.h>
93693@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93694 if (!timespec64_valid_strict(ts))
93695 return -EINVAL;
93696
93697+ gr_log_timechange();
93698+
93699 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93700 write_seqcount_begin(&tk_core.seq);
93701
93702diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93703index 2d3f5c5..7ed7dc5 100644
93704--- a/kernel/time/timer.c
93705+++ b/kernel/time/timer.c
93706@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93707 /*
93708 * This function runs timers and the timer-tq in bottom half context.
93709 */
93710-static void run_timer_softirq(struct softirq_action *h)
93711+static __latent_entropy void run_timer_softirq(void)
93712 {
93713 struct tvec_base *base = __this_cpu_read(tvec_bases);
93714
93715@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93716 *
93717 * In all cases the return value is guaranteed to be non-negative.
93718 */
93719-signed long __sched schedule_timeout(signed long timeout)
93720+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93721 {
93722 struct timer_list timer;
93723 unsigned long expire;
93724diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93725index 61ed862..3b52c65 100644
93726--- a/kernel/time/timer_list.c
93727+++ b/kernel/time/timer_list.c
93728@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93729
93730 static void print_name_offset(struct seq_file *m, void *sym)
93731 {
93732+#ifdef CONFIG_GRKERNSEC_HIDESYM
93733+ SEQ_printf(m, "<%p>", NULL);
93734+#else
93735 char symname[KSYM_NAME_LEN];
93736
93737 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93738 SEQ_printf(m, "<%pK>", sym);
93739 else
93740 SEQ_printf(m, "%s", symname);
93741+#endif
93742 }
93743
93744 static void
93745@@ -119,7 +123,11 @@ next_one:
93746 static void
93747 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93748 {
93749+#ifdef CONFIG_GRKERNSEC_HIDESYM
93750+ SEQ_printf(m, " .base: %p\n", NULL);
93751+#else
93752 SEQ_printf(m, " .base: %pK\n", base);
93753+#endif
93754 SEQ_printf(m, " .index: %d\n",
93755 base->index);
93756 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93757@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93758 {
93759 struct proc_dir_entry *pe;
93760
93761+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93762+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93763+#else
93764 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93765+#endif
93766 if (!pe)
93767 return -ENOMEM;
93768 return 0;
93769diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93770index 1fb08f2..ca4bb1e 100644
93771--- a/kernel/time/timer_stats.c
93772+++ b/kernel/time/timer_stats.c
93773@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93774 static unsigned long nr_entries;
93775 static struct entry entries[MAX_ENTRIES];
93776
93777-static atomic_t overflow_count;
93778+static atomic_unchecked_t overflow_count;
93779
93780 /*
93781 * The entries are in a hash-table, for fast lookup:
93782@@ -140,7 +140,7 @@ static void reset_entries(void)
93783 nr_entries = 0;
93784 memset(entries, 0, sizeof(entries));
93785 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93786- atomic_set(&overflow_count, 0);
93787+ atomic_set_unchecked(&overflow_count, 0);
93788 }
93789
93790 static struct entry *alloc_entry(void)
93791@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93792 if (likely(entry))
93793 entry->count++;
93794 else
93795- atomic_inc(&overflow_count);
93796+ atomic_inc_unchecked(&overflow_count);
93797
93798 out_unlock:
93799 raw_spin_unlock_irqrestore(lock, flags);
93800@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93801
93802 static void print_name_offset(struct seq_file *m, unsigned long addr)
93803 {
93804+#ifdef CONFIG_GRKERNSEC_HIDESYM
93805+ seq_printf(m, "<%p>", NULL);
93806+#else
93807 char symname[KSYM_NAME_LEN];
93808
93809 if (lookup_symbol_name(addr, symname) < 0)
93810- seq_printf(m, "<%p>", (void *)addr);
93811+ seq_printf(m, "<%pK>", (void *)addr);
93812 else
93813 seq_printf(m, "%s", symname);
93814+#endif
93815 }
93816
93817 static int tstats_show(struct seq_file *m, void *v)
93818@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93819
93820 seq_puts(m, "Timer Stats Version: v0.3\n");
93821 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93822- if (atomic_read(&overflow_count))
93823- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93824+ if (atomic_read_unchecked(&overflow_count))
93825+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93826 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93827
93828 for (i = 0; i < nr_entries; i++) {
93829@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93830 {
93831 struct proc_dir_entry *pe;
93832
93833+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93834+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93835+#else
93836 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93837+#endif
93838 if (!pe)
93839 return -ENOMEM;
93840 return 0;
93841diff --git a/kernel/torture.c b/kernel/torture.c
93842index dd70993..0bf694b 100644
93843--- a/kernel/torture.c
93844+++ b/kernel/torture.c
93845@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93846 mutex_lock(&fullstop_mutex);
93847 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93848 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93849- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93850+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93851 } else {
93852 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93853 }
93854@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93855 if (!torture_must_stop()) {
93856 if (stutter > 1) {
93857 schedule_timeout_interruptible(stutter - 1);
93858- ACCESS_ONCE(stutter_pause_test) = 2;
93859+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93860 }
93861 schedule_timeout_interruptible(1);
93862- ACCESS_ONCE(stutter_pause_test) = 1;
93863+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93864 }
93865 if (!torture_must_stop())
93866 schedule_timeout_interruptible(stutter);
93867- ACCESS_ONCE(stutter_pause_test) = 0;
93868+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93869 torture_shutdown_absorb("torture_stutter");
93870 } while (!torture_must_stop());
93871 torture_kthread_stopping("torture_stutter");
93872@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93873 schedule_timeout_uninterruptible(10);
93874 return true;
93875 }
93876- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93877+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93878 mutex_unlock(&fullstop_mutex);
93879 torture_shutdown_cleanup();
93880 torture_shuffle_cleanup();
93881diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93882index 483cecf..ac46091 100644
93883--- a/kernel/trace/blktrace.c
93884+++ b/kernel/trace/blktrace.c
93885@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93886 struct blk_trace *bt = filp->private_data;
93887 char buf[16];
93888
93889- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93890+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93891
93892 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93893 }
93894@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93895 return 1;
93896
93897 bt = buf->chan->private_data;
93898- atomic_inc(&bt->dropped);
93899+ atomic_inc_unchecked(&bt->dropped);
93900 return 0;
93901 }
93902
93903@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93904
93905 bt->dir = dir;
93906 bt->dev = dev;
93907- atomic_set(&bt->dropped, 0);
93908+ atomic_set_unchecked(&bt->dropped, 0);
93909 INIT_LIST_HEAD(&bt->running_list);
93910
93911 ret = -EIO;
93912diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93913index af5bffd..57664b8 100644
93914--- a/kernel/trace/ftrace.c
93915+++ b/kernel/trace/ftrace.c
93916@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93917 if (unlikely(ftrace_disabled))
93918 return 0;
93919
93920+ ret = ftrace_arch_code_modify_prepare();
93921+ FTRACE_WARN_ON(ret);
93922+ if (ret)
93923+ return 0;
93924+
93925 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93926+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93927 if (ret) {
93928 ftrace_bug(ret, rec);
93929- return 0;
93930 }
93931- return 1;
93932+ return ret ? 0 : 1;
93933 }
93934
93935 /*
93936@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
93937 if (!count)
93938 return 0;
93939
93940+ pax_open_kernel();
93941 sort(start, count, sizeof(*start),
93942 ftrace_cmp_ips, ftrace_swap_ips);
93943+ pax_close_kernel();
93944
93945 start_pg = ftrace_allocate_pages(count);
93946 if (!start_pg)
93947@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
93948
93949 if (t->ret_stack == NULL) {
93950 atomic_set(&t->tracing_graph_pause, 0);
93951- atomic_set(&t->trace_overrun, 0);
93952+ atomic_set_unchecked(&t->trace_overrun, 0);
93953 t->curr_ret_stack = -1;
93954 /* Make sure the tasks see the -1 first: */
93955 smp_wmb();
93956@@ -5876,7 +5883,7 @@ static void
93957 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
93958 {
93959 atomic_set(&t->tracing_graph_pause, 0);
93960- atomic_set(&t->trace_overrun, 0);
93961+ atomic_set_unchecked(&t->trace_overrun, 0);
93962 t->ftrace_timestamp = 0;
93963 /* make curr_ret_stack visible before we add the ret_stack */
93964 smp_wmb();
93965diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93966index d2e151c..1498973 100644
93967--- a/kernel/trace/ring_buffer.c
93968+++ b/kernel/trace/ring_buffer.c
93969@@ -350,9 +350,9 @@ struct buffer_data_page {
93970 */
93971 struct buffer_page {
93972 struct list_head list; /* list of buffer pages */
93973- local_t write; /* index for next write */
93974+ local_unchecked_t write; /* index for next write */
93975 unsigned read; /* index for next read */
93976- local_t entries; /* entries on this page */
93977+ local_unchecked_t entries; /* entries on this page */
93978 unsigned long real_end; /* real end of data */
93979 struct buffer_data_page *page; /* Actual data page */
93980 };
93981@@ -473,11 +473,11 @@ struct ring_buffer_per_cpu {
93982 unsigned long last_overrun;
93983 local_t entries_bytes;
93984 local_t entries;
93985- local_t overrun;
93986- local_t commit_overrun;
93987- local_t dropped_events;
93988+ local_unchecked_t overrun;
93989+ local_unchecked_t commit_overrun;
93990+ local_unchecked_t dropped_events;
93991 local_t committing;
93992- local_t commits;
93993+ local_unchecked_t commits;
93994 unsigned long read;
93995 unsigned long read_bytes;
93996 u64 write_stamp;
93997@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93998 *
93999 * We add a counter to the write field to denote this.
94000 */
94001- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94002- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94003+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94004+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94005
94006 /*
94007 * Just make sure we have seen our old_write and synchronize
94008@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94009 * cmpxchg to only update if an interrupt did not already
94010 * do it for us. If the cmpxchg fails, we don't care.
94011 */
94012- (void)local_cmpxchg(&next_page->write, old_write, val);
94013- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94014+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94015+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94016
94017 /*
94018 * No need to worry about races with clearing out the commit.
94019@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94020
94021 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94022 {
94023- return local_read(&bpage->entries) & RB_WRITE_MASK;
94024+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94025 }
94026
94027 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94028 {
94029- return local_read(&bpage->write) & RB_WRITE_MASK;
94030+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94031 }
94032
94033 static int
94034@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94035 * bytes consumed in ring buffer from here.
94036 * Increment overrun to account for the lost events.
94037 */
94038- local_add(page_entries, &cpu_buffer->overrun);
94039+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94040 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94041 }
94042
94043@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94044 * it is our responsibility to update
94045 * the counters.
94046 */
94047- local_add(entries, &cpu_buffer->overrun);
94048+ local_add_unchecked(entries, &cpu_buffer->overrun);
94049 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94050
94051 /*
94052@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94053 if (tail == BUF_PAGE_SIZE)
94054 tail_page->real_end = 0;
94055
94056- local_sub(length, &tail_page->write);
94057+ local_sub_unchecked(length, &tail_page->write);
94058 return;
94059 }
94060
94061@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94062 rb_event_set_padding(event);
94063
94064 /* Set the write back to the previous setting */
94065- local_sub(length, &tail_page->write);
94066+ local_sub_unchecked(length, &tail_page->write);
94067 return;
94068 }
94069
94070@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94071
94072 /* Set write to end of buffer */
94073 length = (tail + length) - BUF_PAGE_SIZE;
94074- local_sub(length, &tail_page->write);
94075+ local_sub_unchecked(length, &tail_page->write);
94076 }
94077
94078 /*
94079@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94080 * about it.
94081 */
94082 if (unlikely(next_page == commit_page)) {
94083- local_inc(&cpu_buffer->commit_overrun);
94084+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94085 goto out_reset;
94086 }
94087
94088@@ -2360,7 +2360,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94089 * this is easy, just stop here.
94090 */
94091 if (!(buffer->flags & RB_FL_OVERWRITE)) {
94092- local_inc(&cpu_buffer->dropped_events);
94093+ local_inc_unchecked(&cpu_buffer->dropped_events);
94094 goto out_reset;
94095 }
94096
94097@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94098 cpu_buffer->tail_page) &&
94099 (cpu_buffer->commit_page ==
94100 cpu_buffer->reader_page))) {
94101- local_inc(&cpu_buffer->commit_overrun);
94102+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94103 goto out_reset;
94104 }
94105 }
94106@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94107 length += RB_LEN_TIME_EXTEND;
94108
94109 tail_page = cpu_buffer->tail_page;
94110- write = local_add_return(length, &tail_page->write);
94111+ write = local_add_return_unchecked(length, &tail_page->write);
94112
94113 /* set write to only the index of the write */
94114 write &= RB_WRITE_MASK;
94115@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94116 kmemcheck_annotate_bitfield(event, bitfield);
94117 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94118
94119- local_inc(&tail_page->entries);
94120+ local_inc_unchecked(&tail_page->entries);
94121
94122 /*
94123 * If this is the first commit on the page, then update
94124@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94125
94126 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94127 unsigned long write_mask =
94128- local_read(&bpage->write) & ~RB_WRITE_MASK;
94129+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94130 unsigned long event_length = rb_event_length(event);
94131 /*
94132 * This is on the tail page. It is possible that
94133@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94134 */
94135 old_index += write_mask;
94136 new_index += write_mask;
94137- index = local_cmpxchg(&bpage->write, old_index, new_index);
94138+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94139 if (index == old_index) {
94140 /* update counters */
94141 local_sub(event_length, &cpu_buffer->entries_bytes);
94142@@ -2516,7 +2516,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94143 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
94144 {
94145 local_inc(&cpu_buffer->committing);
94146- local_inc(&cpu_buffer->commits);
94147+ local_inc_unchecked(&cpu_buffer->commits);
94148 }
94149
94150 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94151@@ -2528,7 +2528,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94152 return;
94153
94154 again:
94155- commits = local_read(&cpu_buffer->commits);
94156+ commits = local_read_unchecked(&cpu_buffer->commits);
94157 /* synchronize with interrupts */
94158 barrier();
94159 if (local_read(&cpu_buffer->committing) == 1)
94160@@ -2544,7 +2544,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94161 * updating of the commit page and the clearing of the
94162 * committing counter.
94163 */
94164- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
94165+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
94166 !local_read(&cpu_buffer->committing)) {
94167 local_inc(&cpu_buffer->committing);
94168 goto again;
94169@@ -2574,7 +2574,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
94170 barrier();
94171 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
94172 local_dec(&cpu_buffer->committing);
94173- local_dec(&cpu_buffer->commits);
94174+ local_dec_unchecked(&cpu_buffer->commits);
94175 return NULL;
94176 }
94177 #endif
94178@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94179
94180 /* Do the likely case first */
94181 if (likely(bpage->page == (void *)addr)) {
94182- local_dec(&bpage->entries);
94183+ local_dec_unchecked(&bpage->entries);
94184 return;
94185 }
94186
94187@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94188 start = bpage;
94189 do {
94190 if (bpage->page == (void *)addr) {
94191- local_dec(&bpage->entries);
94192+ local_dec_unchecked(&bpage->entries);
94193 return;
94194 }
94195 rb_inc_page(cpu_buffer, &bpage);
94196@@ -3200,7 +3200,7 @@ static inline unsigned long
94197 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94198 {
94199 return local_read(&cpu_buffer->entries) -
94200- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94201+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94202 }
94203
94204 /**
94205@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94206 return 0;
94207
94208 cpu_buffer = buffer->buffers[cpu];
94209- ret = local_read(&cpu_buffer->overrun);
94210+ ret = local_read_unchecked(&cpu_buffer->overrun);
94211
94212 return ret;
94213 }
94214@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94215 return 0;
94216
94217 cpu_buffer = buffer->buffers[cpu];
94218- ret = local_read(&cpu_buffer->commit_overrun);
94219+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94220
94221 return ret;
94222 }
94223@@ -3334,7 +3334,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
94224 return 0;
94225
94226 cpu_buffer = buffer->buffers[cpu];
94227- ret = local_read(&cpu_buffer->dropped_events);
94228+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
94229
94230 return ret;
94231 }
94232@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94233 /* if you care about this being correct, lock the buffer */
94234 for_each_buffer_cpu(buffer, cpu) {
94235 cpu_buffer = buffer->buffers[cpu];
94236- overruns += local_read(&cpu_buffer->overrun);
94237+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94238 }
94239
94240 return overruns;
94241@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94242 /*
94243 * Reset the reader page to size zero.
94244 */
94245- local_set(&cpu_buffer->reader_page->write, 0);
94246- local_set(&cpu_buffer->reader_page->entries, 0);
94247+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94248+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94249 local_set(&cpu_buffer->reader_page->page->commit, 0);
94250 cpu_buffer->reader_page->real_end = 0;
94251
94252@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94253 * want to compare with the last_overrun.
94254 */
94255 smp_mb();
94256- overwrite = local_read(&(cpu_buffer->overrun));
94257+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94258
94259 /*
94260 * Here's the tricky part.
94261@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94262
94263 cpu_buffer->head_page
94264 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94265- local_set(&cpu_buffer->head_page->write, 0);
94266- local_set(&cpu_buffer->head_page->entries, 0);
94267+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94268+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94269 local_set(&cpu_buffer->head_page->page->commit, 0);
94270
94271 cpu_buffer->head_page->read = 0;
94272@@ -4186,18 +4186,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94273
94274 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94275 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94276- local_set(&cpu_buffer->reader_page->write, 0);
94277- local_set(&cpu_buffer->reader_page->entries, 0);
94278+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94279+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94280 local_set(&cpu_buffer->reader_page->page->commit, 0);
94281 cpu_buffer->reader_page->read = 0;
94282
94283 local_set(&cpu_buffer->entries_bytes, 0);
94284- local_set(&cpu_buffer->overrun, 0);
94285- local_set(&cpu_buffer->commit_overrun, 0);
94286- local_set(&cpu_buffer->dropped_events, 0);
94287+ local_set_unchecked(&cpu_buffer->overrun, 0);
94288+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94289+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
94290 local_set(&cpu_buffer->entries, 0);
94291 local_set(&cpu_buffer->committing, 0);
94292- local_set(&cpu_buffer->commits, 0);
94293+ local_set_unchecked(&cpu_buffer->commits, 0);
94294 cpu_buffer->read = 0;
94295 cpu_buffer->read_bytes = 0;
94296
94297@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94298 rb_init_page(bpage);
94299 bpage = reader->page;
94300 reader->page = *data_page;
94301- local_set(&reader->write, 0);
94302- local_set(&reader->entries, 0);
94303+ local_set_unchecked(&reader->write, 0);
94304+ local_set_unchecked(&reader->entries, 0);
94305 reader->read = 0;
94306 *data_page = bpage;
94307
94308diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94309index 361a827..6a319a3 100644
94310--- a/kernel/trace/trace.c
94311+++ b/kernel/trace/trace.c
94312@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94313 return 0;
94314 }
94315
94316-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94317+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94318 {
94319 /* do nothing if flag is already set */
94320 if (!!(trace_flags & mask) == !!enabled)
94321diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94322index 8de48ba..3e5b4fa 100644
94323--- a/kernel/trace/trace.h
94324+++ b/kernel/trace/trace.h
94325@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94326 void trace_printk_init_buffers(void);
94327 void trace_printk_start_comm(void);
94328 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94329-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94330+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94331
94332 /*
94333 * Normal trace_printk() and friends allocates special buffers
94334diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94335index 57b67b1..66082a9 100644
94336--- a/kernel/trace/trace_clock.c
94337+++ b/kernel/trace/trace_clock.c
94338@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94339 return now;
94340 }
94341
94342-static atomic64_t trace_counter;
94343+static atomic64_unchecked_t trace_counter;
94344
94345 /*
94346 * trace_clock_counter(): simply an atomic counter.
94347@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94348 */
94349 u64 notrace trace_clock_counter(void)
94350 {
94351- return atomic64_add_return(1, &trace_counter);
94352+ return atomic64_inc_return_unchecked(&trace_counter);
94353 }
94354diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94355index b03a0ea..2df3168 100644
94356--- a/kernel/trace/trace_events.c
94357+++ b/kernel/trace/trace_events.c
94358@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94359 return 0;
94360 }
94361
94362-struct ftrace_module_file_ops;
94363 static void __add_event_to_tracers(struct ftrace_event_call *call);
94364
94365 /* Add an additional event_call dynamically */
94366diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94367index ba47600..d0e47fa 100644
94368--- a/kernel/trace/trace_functions_graph.c
94369+++ b/kernel/trace/trace_functions_graph.c
94370@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94371
94372 /* The return trace stack is full */
94373 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94374- atomic_inc(&current->trace_overrun);
94375+ atomic_inc_unchecked(&current->trace_overrun);
94376 return -EBUSY;
94377 }
94378
94379@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94380 *ret = current->ret_stack[index].ret;
94381 trace->func = current->ret_stack[index].func;
94382 trace->calltime = current->ret_stack[index].calltime;
94383- trace->overrun = atomic_read(&current->trace_overrun);
94384+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94385 trace->depth = index;
94386 }
94387
94388diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94389index 7a9ba62..2e0e4a1 100644
94390--- a/kernel/trace/trace_mmiotrace.c
94391+++ b/kernel/trace/trace_mmiotrace.c
94392@@ -24,7 +24,7 @@ struct header_iter {
94393 static struct trace_array *mmio_trace_array;
94394 static bool overrun_detected;
94395 static unsigned long prev_overruns;
94396-static atomic_t dropped_count;
94397+static atomic_unchecked_t dropped_count;
94398
94399 static void mmio_reset_data(struct trace_array *tr)
94400 {
94401@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94402
94403 static unsigned long count_overruns(struct trace_iterator *iter)
94404 {
94405- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94406+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94407 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94408
94409 if (over > prev_overruns)
94410@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94411 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94412 sizeof(*entry), 0, pc);
94413 if (!event) {
94414- atomic_inc(&dropped_count);
94415+ atomic_inc_unchecked(&dropped_count);
94416 return;
94417 }
94418 entry = ring_buffer_event_data(event);
94419@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94420 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94421 sizeof(*entry), 0, pc);
94422 if (!event) {
94423- atomic_inc(&dropped_count);
94424+ atomic_inc_unchecked(&dropped_count);
94425 return;
94426 }
94427 entry = ring_buffer_event_data(event);
94428diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94429index b77b9a6..82f19bd 100644
94430--- a/kernel/trace/trace_output.c
94431+++ b/kernel/trace/trace_output.c
94432@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94433 goto out;
94434 }
94435
94436+ pax_open_kernel();
94437 if (event->funcs->trace == NULL)
94438- event->funcs->trace = trace_nop_print;
94439+ *(void **)&event->funcs->trace = trace_nop_print;
94440 if (event->funcs->raw == NULL)
94441- event->funcs->raw = trace_nop_print;
94442+ *(void **)&event->funcs->raw = trace_nop_print;
94443 if (event->funcs->hex == NULL)
94444- event->funcs->hex = trace_nop_print;
94445+ *(void **)&event->funcs->hex = trace_nop_print;
94446 if (event->funcs->binary == NULL)
94447- event->funcs->binary = trace_nop_print;
94448+ *(void **)&event->funcs->binary = trace_nop_print;
94449+ pax_close_kernel();
94450
94451 key = event->type & (EVENT_HASHSIZE - 1);
94452
94453diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94454index f8b45d8..70ff6c8 100644
94455--- a/kernel/trace/trace_seq.c
94456+++ b/kernel/trace/trace_seq.c
94457@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94458 return 0;
94459 }
94460
94461- seq_buf_path(&s->seq, path, "\n");
94462+ seq_buf_path(&s->seq, path, "\n\\");
94463
94464 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94465 s->seq.len = save_len;
94466diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94467index 16eddb3..758b308 100644
94468--- a/kernel/trace/trace_stack.c
94469+++ b/kernel/trace/trace_stack.c
94470@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94471 return;
94472
94473 /* we do not handle interrupt stacks yet */
94474- if (!object_is_on_stack(stack))
94475+ if (!object_starts_on_stack(stack))
94476 return;
94477
94478 local_irq_save(flags);
94479diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94480index c6ee36f..78513f3 100644
94481--- a/kernel/trace/trace_syscalls.c
94482+++ b/kernel/trace/trace_syscalls.c
94483@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94484 int num;
94485
94486 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94487+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94488+ return -EINVAL;
94489
94490 mutex_lock(&syscall_trace_lock);
94491 if (!sys_perf_refcount_enter)
94492@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94493 int num;
94494
94495 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94496+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94497+ return;
94498
94499 mutex_lock(&syscall_trace_lock);
94500 sys_perf_refcount_enter--;
94501@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94502 int num;
94503
94504 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94505+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94506+ return -EINVAL;
94507
94508 mutex_lock(&syscall_trace_lock);
94509 if (!sys_perf_refcount_exit)
94510@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94511 int num;
94512
94513 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94514+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94515+ return;
94516
94517 mutex_lock(&syscall_trace_lock);
94518 sys_perf_refcount_exit--;
94519diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94520index 4109f83..fe1f830 100644
94521--- a/kernel/user_namespace.c
94522+++ b/kernel/user_namespace.c
94523@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94524 !kgid_has_mapping(parent_ns, group))
94525 return -EPERM;
94526
94527+#ifdef CONFIG_GRKERNSEC
94528+ /*
94529+ * This doesn't really inspire confidence:
94530+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94531+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94532+ * Increases kernel attack surface in areas developers
94533+ * previously cared little about ("low importance due
94534+ * to requiring "root" capability")
94535+ * To be removed when this code receives *proper* review
94536+ */
94537+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94538+ !capable(CAP_SETGID))
94539+ return -EPERM;
94540+#endif
94541+
94542 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94543 if (!ns)
94544 return -ENOMEM;
94545@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94546 if (atomic_read(&current->mm->mm_users) > 1)
94547 return -EINVAL;
94548
94549- if (current->fs->users != 1)
94550+ if (atomic_read(&current->fs->users) != 1)
94551 return -EINVAL;
94552
94553 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94554diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94555index c8eac43..4b5f08f 100644
94556--- a/kernel/utsname_sysctl.c
94557+++ b/kernel/utsname_sysctl.c
94558@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94559 static int proc_do_uts_string(struct ctl_table *table, int write,
94560 void __user *buffer, size_t *lenp, loff_t *ppos)
94561 {
94562- struct ctl_table uts_table;
94563+ ctl_table_no_const uts_table;
94564 int r;
94565 memcpy(&uts_table, table, sizeof(uts_table));
94566 uts_table.data = get_uts(table, write);
94567diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94568index 70bf118..4be3c37 100644
94569--- a/kernel/watchdog.c
94570+++ b/kernel/watchdog.c
94571@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94572 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94573 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94574
94575-static struct smp_hotplug_thread watchdog_threads = {
94576+static struct smp_hotplug_thread watchdog_threads __read_only = {
94577 .store = &softlockup_watchdog,
94578 .thread_should_run = watchdog_should_run,
94579 .thread_fn = watchdog,
94580diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94581index 82d0c8d..37f4222 100644
94582--- a/kernel/workqueue.c
94583+++ b/kernel/workqueue.c
94584@@ -4565,7 +4565,7 @@ static void rebind_workers(struct worker_pool *pool)
94585 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94586 worker_flags |= WORKER_REBOUND;
94587 worker_flags &= ~WORKER_UNBOUND;
94588- ACCESS_ONCE(worker->flags) = worker_flags;
94589+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94590 }
94591
94592 spin_unlock_irq(&pool->lock);
94593diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94594index 5f2ce61..85a0b1b 100644
94595--- a/lib/Kconfig.debug
94596+++ b/lib/Kconfig.debug
94597@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94598
94599 config DEBUG_WW_MUTEX_SLOWPATH
94600 bool "Wait/wound mutex debugging: Slowpath testing"
94601- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94602+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94603 select DEBUG_LOCK_ALLOC
94604 select DEBUG_SPINLOCK
94605 select DEBUG_MUTEXES
94606@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94607
94608 config DEBUG_LOCK_ALLOC
94609 bool "Lock debugging: detect incorrect freeing of live locks"
94610- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94611+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94612 select DEBUG_SPINLOCK
94613 select DEBUG_MUTEXES
94614 select LOCKDEP
94615@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94616
94617 config PROVE_LOCKING
94618 bool "Lock debugging: prove locking correctness"
94619- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94620+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94621 select LOCKDEP
94622 select DEBUG_SPINLOCK
94623 select DEBUG_MUTEXES
94624@@ -992,7 +992,7 @@ config LOCKDEP
94625
94626 config LOCK_STAT
94627 bool "Lock usage statistics"
94628- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94629+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94630 select LOCKDEP
94631 select DEBUG_SPINLOCK
94632 select DEBUG_MUTEXES
94633@@ -1453,6 +1453,7 @@ config LATENCYTOP
94634 depends on DEBUG_KERNEL
94635 depends on STACKTRACE_SUPPORT
94636 depends on PROC_FS
94637+ depends on !GRKERNSEC_HIDESYM
94638 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94639 select KALLSYMS
94640 select KALLSYMS_ALL
94641@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94642 config DEBUG_STRICT_USER_COPY_CHECKS
94643 bool "Strict user copy size checks"
94644 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94645- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94646+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94647 help
94648 Enabling this option turns a certain set of sanity checks for user
94649 copy operations into compile time failures.
94650@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94651
94652 config PROVIDE_OHCI1394_DMA_INIT
94653 bool "Remote debugging over FireWire early on boot"
94654- depends on PCI && X86
94655+ depends on PCI && X86 && !GRKERNSEC
94656 help
94657 If you want to debug problems which hang or crash the kernel early
94658 on boot and the crashing machine has a FireWire port, you can use
94659diff --git a/lib/Makefile b/lib/Makefile
94660index 3c3b30b..ca29102 100644
94661--- a/lib/Makefile
94662+++ b/lib/Makefile
94663@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94664 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94665 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94666 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94667-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94668+obj-y += list_debug.o
94669 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94670
94671 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94672diff --git a/lib/average.c b/lib/average.c
94673index 114d1be..ab0350c 100644
94674--- a/lib/average.c
94675+++ b/lib/average.c
94676@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94677 {
94678 unsigned long internal = ACCESS_ONCE(avg->internal);
94679
94680- ACCESS_ONCE(avg->internal) = internal ?
94681+ ACCESS_ONCE_RW(avg->internal) = internal ?
94682 (((internal << avg->weight) - internal) +
94683 (val << avg->factor)) >> avg->weight :
94684 (val << avg->factor);
94685diff --git a/lib/bitmap.c b/lib/bitmap.c
94686index 324ea9e..46b1ae2 100644
94687--- a/lib/bitmap.c
94688+++ b/lib/bitmap.c
94689@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94690 }
94691 EXPORT_SYMBOL(__bitmap_subset);
94692
94693-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94694+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94695 {
94696 unsigned int k, lim = bits/BITS_PER_LONG;
94697 int w = 0;
94698@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94699 {
94700 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94701 u32 chunk;
94702- const char __user __force *ubuf = (const char __user __force *)buf;
94703+ const char __user *ubuf = (const char __force_user *)buf;
94704
94705 bitmap_zero(maskp, nmaskbits);
94706
94707@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94708 {
94709 if (!access_ok(VERIFY_READ, ubuf, ulen))
94710 return -EFAULT;
94711- return __bitmap_parse((const char __force *)ubuf,
94712+ return __bitmap_parse((const char __force_kernel *)ubuf,
94713 ulen, 1, maskp, nmaskbits);
94714
94715 }
94716@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94717 {
94718 unsigned a, b;
94719 int c, old_c, totaldigits;
94720- const char __user __force *ubuf = (const char __user __force *)buf;
94721+ const char __user *ubuf = (const char __force_user *)buf;
94722 int exp_digit, in_range;
94723
94724 totaldigits = c = 0;
94725@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94726 {
94727 if (!access_ok(VERIFY_READ, ubuf, ulen))
94728 return -EFAULT;
94729- return __bitmap_parselist((const char __force *)ubuf,
94730+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94731 ulen, 1, maskp, nmaskbits);
94732 }
94733 EXPORT_SYMBOL(bitmap_parselist_user);
94734diff --git a/lib/bug.c b/lib/bug.c
94735index 0c3bd95..5a615a1 100644
94736--- a/lib/bug.c
94737+++ b/lib/bug.c
94738@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94739 return BUG_TRAP_TYPE_NONE;
94740
94741 bug = find_bug(bugaddr);
94742+ if (!bug)
94743+ return BUG_TRAP_TYPE_NONE;
94744
94745 file = NULL;
94746 line = 0;
94747diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94748index 547f7f9..a6d4ba0 100644
94749--- a/lib/debugobjects.c
94750+++ b/lib/debugobjects.c
94751@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94752 if (limit > 4)
94753 return;
94754
94755- is_on_stack = object_is_on_stack(addr);
94756+ is_on_stack = object_starts_on_stack(addr);
94757 if (is_on_stack == onstack)
94758 return;
94759
94760diff --git a/lib/div64.c b/lib/div64.c
94761index 4382ad7..08aa558 100644
94762--- a/lib/div64.c
94763+++ b/lib/div64.c
94764@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94765 EXPORT_SYMBOL(__div64_32);
94766
94767 #ifndef div_s64_rem
94768-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94769+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94770 {
94771 u64 quotient;
94772
94773@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94774 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94775 */
94776 #ifndef div64_u64
94777-u64 div64_u64(u64 dividend, u64 divisor)
94778+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94779 {
94780 u32 high = divisor >> 32;
94781 u64 quot;
94782diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94783index 9722bd2..0d826f4 100644
94784--- a/lib/dma-debug.c
94785+++ b/lib/dma-debug.c
94786@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94787
94788 void dma_debug_add_bus(struct bus_type *bus)
94789 {
94790- struct notifier_block *nb;
94791+ notifier_block_no_const *nb;
94792
94793 if (dma_debug_disabled())
94794 return;
94795@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94796
94797 static void check_for_stack(struct device *dev, void *addr)
94798 {
94799- if (object_is_on_stack(addr))
94800+ if (object_starts_on_stack(addr))
94801 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94802 "stack [addr=%p]\n", addr);
94803 }
94804diff --git a/lib/inflate.c b/lib/inflate.c
94805index 013a761..c28f3fc 100644
94806--- a/lib/inflate.c
94807+++ b/lib/inflate.c
94808@@ -269,7 +269,7 @@ static void free(void *where)
94809 malloc_ptr = free_mem_ptr;
94810 }
94811 #else
94812-#define malloc(a) kmalloc(a, GFP_KERNEL)
94813+#define malloc(a) kmalloc((a), GFP_KERNEL)
94814 #define free(a) kfree(a)
94815 #endif
94816
94817diff --git a/lib/ioremap.c b/lib/ioremap.c
94818index 0c9216c..863bd89 100644
94819--- a/lib/ioremap.c
94820+++ b/lib/ioremap.c
94821@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94822 unsigned long next;
94823
94824 phys_addr -= addr;
94825- pmd = pmd_alloc(&init_mm, pud, addr);
94826+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94827 if (!pmd)
94828 return -ENOMEM;
94829 do {
94830@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94831 unsigned long next;
94832
94833 phys_addr -= addr;
94834- pud = pud_alloc(&init_mm, pgd, addr);
94835+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94836 if (!pud)
94837 return -ENOMEM;
94838 do {
94839diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94840index bd2bea9..6b3c95e 100644
94841--- a/lib/is_single_threaded.c
94842+++ b/lib/is_single_threaded.c
94843@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94844 struct task_struct *p, *t;
94845 bool ret;
94846
94847+ if (!mm)
94848+ return true;
94849+
94850 if (atomic_read(&task->signal->live) != 1)
94851 return false;
94852
94853diff --git a/lib/kobject.c b/lib/kobject.c
94854index 03d4ab3..46f6374 100644
94855--- a/lib/kobject.c
94856+++ b/lib/kobject.c
94857@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94858
94859
94860 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94861-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94862+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94863
94864-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94865+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94866 {
94867 enum kobj_ns_type type = ops->type;
94868 int error;
94869diff --git a/lib/list_debug.c b/lib/list_debug.c
94870index c24c2f7..f0296f4 100644
94871--- a/lib/list_debug.c
94872+++ b/lib/list_debug.c
94873@@ -11,7 +11,9 @@
94874 #include <linux/bug.h>
94875 #include <linux/kernel.h>
94876 #include <linux/rculist.h>
94877+#include <linux/mm.h>
94878
94879+#ifdef CONFIG_DEBUG_LIST
94880 /*
94881 * Insert a new entry between two known consecutive entries.
94882 *
94883@@ -19,21 +21,40 @@
94884 * the prev/next entries already!
94885 */
94886
94887+static bool __list_add_debug(struct list_head *new,
94888+ struct list_head *prev,
94889+ struct list_head *next)
94890+{
94891+ if (unlikely(next->prev != prev)) {
94892+ printk(KERN_ERR "list_add corruption. next->prev should be "
94893+ "prev (%p), but was %p. (next=%p).\n",
94894+ prev, next->prev, next);
94895+ BUG();
94896+ return false;
94897+ }
94898+ if (unlikely(prev->next != next)) {
94899+ printk(KERN_ERR "list_add corruption. prev->next should be "
94900+ "next (%p), but was %p. (prev=%p).\n",
94901+ next, prev->next, prev);
94902+ BUG();
94903+ return false;
94904+ }
94905+ if (unlikely(new == prev || new == next)) {
94906+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94907+ new, prev, next);
94908+ BUG();
94909+ return false;
94910+ }
94911+ return true;
94912+}
94913+
94914 void __list_add(struct list_head *new,
94915- struct list_head *prev,
94916- struct list_head *next)
94917+ struct list_head *prev,
94918+ struct list_head *next)
94919 {
94920- WARN(next->prev != prev,
94921- "list_add corruption. next->prev should be "
94922- "prev (%p), but was %p. (next=%p).\n",
94923- prev, next->prev, next);
94924- WARN(prev->next != next,
94925- "list_add corruption. prev->next should be "
94926- "next (%p), but was %p. (prev=%p).\n",
94927- next, prev->next, prev);
94928- WARN(new == prev || new == next,
94929- "list_add double add: new=%p, prev=%p, next=%p.\n",
94930- new, prev, next);
94931+ if (!__list_add_debug(new, prev, next))
94932+ return;
94933+
94934 next->prev = new;
94935 new->next = next;
94936 new->prev = prev;
94937@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94938 }
94939 EXPORT_SYMBOL(__list_add);
94940
94941-void __list_del_entry(struct list_head *entry)
94942+static bool __list_del_entry_debug(struct list_head *entry)
94943 {
94944 struct list_head *prev, *next;
94945
94946 prev = entry->prev;
94947 next = entry->next;
94948
94949- if (WARN(next == LIST_POISON1,
94950- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94951- entry, LIST_POISON1) ||
94952- WARN(prev == LIST_POISON2,
94953- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94954- entry, LIST_POISON2) ||
94955- WARN(prev->next != entry,
94956- "list_del corruption. prev->next should be %p, "
94957- "but was %p\n", entry, prev->next) ||
94958- WARN(next->prev != entry,
94959- "list_del corruption. next->prev should be %p, "
94960- "but was %p\n", entry, next->prev))
94961+ if (unlikely(next == LIST_POISON1)) {
94962+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94963+ entry, LIST_POISON1);
94964+ BUG();
94965+ return false;
94966+ }
94967+ if (unlikely(prev == LIST_POISON2)) {
94968+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94969+ entry, LIST_POISON2);
94970+ BUG();
94971+ return false;
94972+ }
94973+ if (unlikely(entry->prev->next != entry)) {
94974+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94975+ "but was %p\n", entry, prev->next);
94976+ BUG();
94977+ return false;
94978+ }
94979+ if (unlikely(entry->next->prev != entry)) {
94980+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94981+ "but was %p\n", entry, next->prev);
94982+ BUG();
94983+ return false;
94984+ }
94985+ return true;
94986+}
94987+
94988+void __list_del_entry(struct list_head *entry)
94989+{
94990+ if (!__list_del_entry_debug(entry))
94991 return;
94992
94993- __list_del(prev, next);
94994+ __list_del(entry->prev, entry->next);
94995 }
94996 EXPORT_SYMBOL(__list_del_entry);
94997
94998@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94999 void __list_add_rcu(struct list_head *new,
95000 struct list_head *prev, struct list_head *next)
95001 {
95002- WARN(next->prev != prev,
95003- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95004- prev, next->prev, next);
95005- WARN(prev->next != next,
95006- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95007- next, prev->next, prev);
95008+ if (!__list_add_debug(new, prev, next))
95009+ return;
95010+
95011 new->next = next;
95012 new->prev = prev;
95013 rcu_assign_pointer(list_next_rcu(prev), new);
95014 next->prev = new;
95015 }
95016 EXPORT_SYMBOL(__list_add_rcu);
95017+#endif
95018+
95019+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95020+{
95021+#ifdef CONFIG_DEBUG_LIST
95022+ if (!__list_add_debug(new, prev, next))
95023+ return;
95024+#endif
95025+
95026+ pax_open_kernel();
95027+ next->prev = new;
95028+ new->next = next;
95029+ new->prev = prev;
95030+ prev->next = new;
95031+ pax_close_kernel();
95032+}
95033+EXPORT_SYMBOL(__pax_list_add);
95034+
95035+void pax_list_del(struct list_head *entry)
95036+{
95037+#ifdef CONFIG_DEBUG_LIST
95038+ if (!__list_del_entry_debug(entry))
95039+ return;
95040+#endif
95041+
95042+ pax_open_kernel();
95043+ __list_del(entry->prev, entry->next);
95044+ entry->next = LIST_POISON1;
95045+ entry->prev = LIST_POISON2;
95046+ pax_close_kernel();
95047+}
95048+EXPORT_SYMBOL(pax_list_del);
95049+
95050+void pax_list_del_init(struct list_head *entry)
95051+{
95052+ pax_open_kernel();
95053+ __list_del(entry->prev, entry->next);
95054+ INIT_LIST_HEAD(entry);
95055+ pax_close_kernel();
95056+}
95057+EXPORT_SYMBOL(pax_list_del_init);
95058+
95059+void __pax_list_add_rcu(struct list_head *new,
95060+ struct list_head *prev, struct list_head *next)
95061+{
95062+#ifdef CONFIG_DEBUG_LIST
95063+ if (!__list_add_debug(new, prev, next))
95064+ return;
95065+#endif
95066+
95067+ pax_open_kernel();
95068+ new->next = next;
95069+ new->prev = prev;
95070+ rcu_assign_pointer(list_next_rcu(prev), new);
95071+ next->prev = new;
95072+ pax_close_kernel();
95073+}
95074+EXPORT_SYMBOL(__pax_list_add_rcu);
95075+
95076+void pax_list_del_rcu(struct list_head *entry)
95077+{
95078+#ifdef CONFIG_DEBUG_LIST
95079+ if (!__list_del_entry_debug(entry))
95080+ return;
95081+#endif
95082+
95083+ pax_open_kernel();
95084+ __list_del(entry->prev, entry->next);
95085+ entry->next = LIST_POISON1;
95086+ entry->prev = LIST_POISON2;
95087+ pax_close_kernel();
95088+}
95089+EXPORT_SYMBOL(pax_list_del_rcu);
95090diff --git a/lib/lockref.c b/lib/lockref.c
95091index d2233de..fa1a2f6 100644
95092--- a/lib/lockref.c
95093+++ b/lib/lockref.c
95094@@ -48,13 +48,13 @@
95095 void lockref_get(struct lockref *lockref)
95096 {
95097 CMPXCHG_LOOP(
95098- new.count++;
95099+ __lockref_inc(&new);
95100 ,
95101 return;
95102 );
95103
95104 spin_lock(&lockref->lock);
95105- lockref->count++;
95106+ __lockref_inc(lockref);
95107 spin_unlock(&lockref->lock);
95108 }
95109 EXPORT_SYMBOL(lockref_get);
95110@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95111 int retval;
95112
95113 CMPXCHG_LOOP(
95114- new.count++;
95115+ __lockref_inc(&new);
95116 if (!old.count)
95117 return 0;
95118 ,
95119@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95120 spin_lock(&lockref->lock);
95121 retval = 0;
95122 if (lockref->count) {
95123- lockref->count++;
95124+ __lockref_inc(lockref);
95125 retval = 1;
95126 }
95127 spin_unlock(&lockref->lock);
95128@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95129 int lockref_get_or_lock(struct lockref *lockref)
95130 {
95131 CMPXCHG_LOOP(
95132- new.count++;
95133+ __lockref_inc(&new);
95134 if (!old.count)
95135 break;
95136 ,
95137@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95138 spin_lock(&lockref->lock);
95139 if (!lockref->count)
95140 return 0;
95141- lockref->count++;
95142+ __lockref_inc(lockref);
95143 spin_unlock(&lockref->lock);
95144 return 1;
95145 }
95146@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95147 int lockref_put_or_lock(struct lockref *lockref)
95148 {
95149 CMPXCHG_LOOP(
95150- new.count--;
95151+ __lockref_dec(&new);
95152 if (old.count <= 1)
95153 break;
95154 ,
95155@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95156 spin_lock(&lockref->lock);
95157 if (lockref->count <= 1)
95158 return 0;
95159- lockref->count--;
95160+ __lockref_dec(lockref);
95161 spin_unlock(&lockref->lock);
95162 return 1;
95163 }
95164@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95165 int retval;
95166
95167 CMPXCHG_LOOP(
95168- new.count++;
95169+ __lockref_inc(&new);
95170 if ((int)old.count < 0)
95171 return 0;
95172 ,
95173@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95174 spin_lock(&lockref->lock);
95175 retval = 0;
95176 if ((int) lockref->count >= 0) {
95177- lockref->count++;
95178+ __lockref_inc(lockref);
95179 retval = 1;
95180 }
95181 spin_unlock(&lockref->lock);
95182diff --git a/lib/nlattr.c b/lib/nlattr.c
95183index 9c3e85f..0affd1b 100644
95184--- a/lib/nlattr.c
95185+++ b/lib/nlattr.c
95186@@ -279,7 +279,11 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
95187 {
95188 int minlen = min_t(int, count, nla_len(src));
95189
95190+ BUG_ON(minlen < 0);
95191+
95192 memcpy(dest, nla_data(src), minlen);
95193+ if (count > minlen)
95194+ memset(dest + minlen, 0, count - minlen);
95195
95196 return minlen;
95197 }
95198diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95199index 6111bcb..02e816b 100644
95200--- a/lib/percpu-refcount.c
95201+++ b/lib/percpu-refcount.c
95202@@ -31,7 +31,7 @@
95203 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95204 */
95205
95206-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95207+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95208
95209 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95210
95211diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95212index 3291a8e..346a91e 100644
95213--- a/lib/radix-tree.c
95214+++ b/lib/radix-tree.c
95215@@ -67,7 +67,7 @@ struct radix_tree_preload {
95216 int nr;
95217 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95218 };
95219-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95220+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95221
95222 static inline void *ptr_to_indirect(void *ptr)
95223 {
95224diff --git a/lib/random32.c b/lib/random32.c
95225index 0bee183..526f12f 100644
95226--- a/lib/random32.c
95227+++ b/lib/random32.c
95228@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95229 }
95230 #endif
95231
95232-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95233+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95234
95235 /**
95236 * prandom_u32_state - seeded pseudo-random number generator.
95237diff --git a/lib/rbtree.c b/lib/rbtree.c
95238index c16c81a..4dcbda1 100644
95239--- a/lib/rbtree.c
95240+++ b/lib/rbtree.c
95241@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95242 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95243
95244 static const struct rb_augment_callbacks dummy_callbacks = {
95245- dummy_propagate, dummy_copy, dummy_rotate
95246+ .propagate = dummy_propagate,
95247+ .copy = dummy_copy,
95248+ .rotate = dummy_rotate
95249 };
95250
95251 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95252diff --git a/lib/show_mem.c b/lib/show_mem.c
95253index 7de89f4..00d70b7 100644
95254--- a/lib/show_mem.c
95255+++ b/lib/show_mem.c
95256@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95257 quicklist_total_size());
95258 #endif
95259 #ifdef CONFIG_MEMORY_FAILURE
95260- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95261+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95262 #endif
95263 }
95264diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95265index bb2b201..46abaf9 100644
95266--- a/lib/strncpy_from_user.c
95267+++ b/lib/strncpy_from_user.c
95268@@ -21,7 +21,7 @@
95269 */
95270 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95271 {
95272- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95273+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95274 long res = 0;
95275
95276 /*
95277diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95278index a28df52..3d55877 100644
95279--- a/lib/strnlen_user.c
95280+++ b/lib/strnlen_user.c
95281@@ -26,7 +26,7 @@
95282 */
95283 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95284 {
95285- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95286+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95287 long align, res = 0;
95288 unsigned long c;
95289
95290diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95291index 4abda07..b9d3765 100644
95292--- a/lib/swiotlb.c
95293+++ b/lib/swiotlb.c
95294@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95295
95296 void
95297 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95298- dma_addr_t dev_addr)
95299+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95300 {
95301 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95302
95303diff --git a/lib/usercopy.c b/lib/usercopy.c
95304index 4f5b1dd..7cab418 100644
95305--- a/lib/usercopy.c
95306+++ b/lib/usercopy.c
95307@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95308 WARN(1, "Buffer overflow detected!\n");
95309 }
95310 EXPORT_SYMBOL(copy_from_user_overflow);
95311+
95312+void copy_to_user_overflow(void)
95313+{
95314+ WARN(1, "Buffer overflow detected!\n");
95315+}
95316+EXPORT_SYMBOL(copy_to_user_overflow);
95317diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95318index ec337f6..8484eb2 100644
95319--- a/lib/vsprintf.c
95320+++ b/lib/vsprintf.c
95321@@ -16,6 +16,9 @@
95322 * - scnprintf and vscnprintf
95323 */
95324
95325+#ifdef CONFIG_GRKERNSEC_HIDESYM
95326+#define __INCLUDED_BY_HIDESYM 1
95327+#endif
95328 #include <stdarg.h>
95329 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95330 #include <linux/types.h>
95331@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95332 #ifdef CONFIG_KALLSYMS
95333 if (*fmt == 'B')
95334 sprint_backtrace(sym, value);
95335- else if (*fmt != 'f' && *fmt != 's')
95336+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95337 sprint_symbol(sym, value);
95338 else
95339 sprint_symbol_no_offset(sym, value);
95340@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95341 return number(buf, end, num, spec);
95342 }
95343
95344+#ifdef CONFIG_GRKERNSEC_HIDESYM
95345+int kptr_restrict __read_mostly = 2;
95346+#else
95347 int kptr_restrict __read_mostly;
95348+#endif
95349
95350 /*
95351 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95352@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95353 *
95354 * - 'F' For symbolic function descriptor pointers with offset
95355 * - 'f' For simple symbolic function names without offset
95356+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95357 * - 'S' For symbolic direct pointers with offset
95358 * - 's' For symbolic direct pointers without offset
95359+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95360 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95361 * - 'B' For backtraced symbolic direct pointers with offset
95362 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95363@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95364
95365 if (!ptr && *fmt != 'K') {
95366 /*
95367- * Print (null) with the same width as a pointer so it makes
95368+ * Print (nil) with the same width as a pointer so it makes
95369 * tabular output look nice.
95370 */
95371 if (spec.field_width == -1)
95372 spec.field_width = default_width;
95373- return string(buf, end, "(null)", spec);
95374+ return string(buf, end, "(nil)", spec);
95375 }
95376
95377 switch (*fmt) {
95378@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95379 /* Fallthrough */
95380 case 'S':
95381 case 's':
95382+#ifdef CONFIG_GRKERNSEC_HIDESYM
95383+ break;
95384+#else
95385+ return symbol_string(buf, end, ptr, spec, fmt);
95386+#endif
95387+ case 'X':
95388+ ptr = dereference_function_descriptor(ptr);
95389+ case 'A':
95390 case 'B':
95391 return symbol_string(buf, end, ptr, spec, fmt);
95392 case 'R':
95393@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95394 va_end(va);
95395 return buf;
95396 }
95397+ case 'P':
95398+ break;
95399 case 'K':
95400 /*
95401 * %pK cannot be used in IRQ context because its test
95402@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95403 ((const struct file *)ptr)->f_path.dentry,
95404 spec, fmt);
95405 }
95406+
95407+#ifdef CONFIG_GRKERNSEC_HIDESYM
95408+ /* 'P' = approved pointers to copy to userland,
95409+ as in the /proc/kallsyms case, as we make it display nothing
95410+ for non-root users, and the real contents for root users
95411+ 'X' = approved simple symbols
95412+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95413+ above
95414+ */
95415+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95416+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95417+ dump_stack();
95418+ ptr = NULL;
95419+ }
95420+#endif
95421+
95422 spec.flags |= SMALL;
95423 if (spec.field_width == -1) {
95424 spec.field_width = default_width;
95425@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95426 typeof(type) value; \
95427 if (sizeof(type) == 8) { \
95428 args = PTR_ALIGN(args, sizeof(u32)); \
95429- *(u32 *)&value = *(u32 *)args; \
95430- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95431+ *(u32 *)&value = *(const u32 *)args; \
95432+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95433 } else { \
95434 args = PTR_ALIGN(args, sizeof(type)); \
95435- value = *(typeof(type) *)args; \
95436+ value = *(const typeof(type) *)args; \
95437 } \
95438 args += sizeof(type); \
95439 value; \
95440@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95441 case FORMAT_TYPE_STR: {
95442 const char *str_arg = args;
95443 args += strlen(str_arg) + 1;
95444- str = string(str, end, (char *)str_arg, spec);
95445+ str = string(str, end, str_arg, spec);
95446 break;
95447 }
95448
95449diff --git a/localversion-grsec b/localversion-grsec
95450new file mode 100644
95451index 0000000..7cd6065
95452--- /dev/null
95453+++ b/localversion-grsec
95454@@ -0,0 +1 @@
95455+-grsec
95456diff --git a/mm/Kconfig b/mm/Kconfig
95457index 1d1ae6b..0f05885 100644
95458--- a/mm/Kconfig
95459+++ b/mm/Kconfig
95460@@ -341,10 +341,11 @@ config KSM
95461 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95462
95463 config DEFAULT_MMAP_MIN_ADDR
95464- int "Low address space to protect from user allocation"
95465+ int "Low address space to protect from user allocation"
95466 depends on MMU
95467- default 4096
95468- help
95469+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95470+ default 65536
95471+ help
95472 This is the portion of low virtual memory which should be protected
95473 from userspace allocation. Keeping a user from writing to low pages
95474 can help reduce the impact of kernel NULL pointer bugs.
95475@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95476
95477 config HWPOISON_INJECT
95478 tristate "HWPoison pages injector"
95479- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95480+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95481 select PROC_PAGE_MONITOR
95482
95483 config NOMMU_INITIAL_TRIM_EXCESS
95484diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
95485index 957d3da..1d34e20 100644
95486--- a/mm/Kconfig.debug
95487+++ b/mm/Kconfig.debug
95488@@ -10,6 +10,7 @@ config PAGE_EXTENSION
95489 config DEBUG_PAGEALLOC
95490 bool "Debug page memory allocations"
95491 depends on DEBUG_KERNEL
95492+ depends on !PAX_MEMORY_SANITIZE
95493 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
95494 depends on !KMEMCHECK
95495 select PAGE_EXTENSION
95496diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95497index 0ae0df5..82ac56b 100644
95498--- a/mm/backing-dev.c
95499+++ b/mm/backing-dev.c
95500@@ -12,7 +12,7 @@
95501 #include <linux/device.h>
95502 #include <trace/events/writeback.h>
95503
95504-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95505+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95506
95507 struct backing_dev_info default_backing_dev_info = {
95508 .name = "default",
95509@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95510 return err;
95511
95512 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95513- atomic_long_inc_return(&bdi_seq));
95514+ atomic_long_inc_return_unchecked(&bdi_seq));
95515 if (err) {
95516 bdi_destroy(bdi);
95517 return err;
95518diff --git a/mm/filemap.c b/mm/filemap.c
95519index 673e458..7192013 100644
95520--- a/mm/filemap.c
95521+++ b/mm/filemap.c
95522@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95523 struct address_space *mapping = file->f_mapping;
95524
95525 if (!mapping->a_ops->readpage)
95526- return -ENOEXEC;
95527+ return -ENODEV;
95528 file_accessed(file);
95529 vma->vm_ops = &generic_file_vm_ops;
95530 return 0;
95531@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95532 *pos = i_size_read(inode);
95533
95534 if (limit != RLIM_INFINITY) {
95535+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95536 if (*pos >= limit) {
95537 send_sig(SIGXFSZ, current, 0);
95538 return -EFBIG;
95539diff --git a/mm/fremap.c b/mm/fremap.c
95540index 2805d71..8b56e7d 100644
95541--- a/mm/fremap.c
95542+++ b/mm/fremap.c
95543@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95544 retry:
95545 vma = find_vma(mm, start);
95546
95547+#ifdef CONFIG_PAX_SEGMEXEC
95548+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95549+ goto out;
95550+#endif
95551+
95552 /*
95553 * Make sure the vma is shared, that it supports prefaulting,
95554 * and that the remapped range is valid and fully within
95555diff --git a/mm/gup.c b/mm/gup.c
95556index 9b2afbf..647297c 100644
95557--- a/mm/gup.c
95558+++ b/mm/gup.c
95559@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95560 unsigned int fault_flags = 0;
95561 int ret;
95562
95563- /* For mlock, just skip the stack guard page. */
95564- if ((*flags & FOLL_MLOCK) &&
95565- (stack_guard_page_start(vma, address) ||
95566- stack_guard_page_end(vma, address + PAGE_SIZE)))
95567- return -ENOENT;
95568 if (*flags & FOLL_WRITE)
95569 fault_flags |= FAULT_FLAG_WRITE;
95570 if (nonblocking)
95571@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95572 if (!(gup_flags & FOLL_FORCE))
95573 gup_flags |= FOLL_NUMA;
95574
95575- do {
95576+ while (nr_pages) {
95577 struct page *page;
95578 unsigned int foll_flags = gup_flags;
95579 unsigned int page_increm;
95580
95581 /* first iteration or cross vma bound */
95582 if (!vma || start >= vma->vm_end) {
95583- vma = find_extend_vma(mm, start);
95584+ vma = find_vma(mm, start);
95585 if (!vma && in_gate_area(mm, start)) {
95586 int ret;
95587 ret = get_gate_page(mm, start & PAGE_MASK,
95588@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95589 goto next_page;
95590 }
95591
95592- if (!vma || check_vma_flags(vma, gup_flags))
95593+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95594 return i ? : -EFAULT;
95595 if (is_vm_hugetlb_page(vma)) {
95596 i = follow_hugetlb_page(mm, vma, pages, vmas,
95597@@ -518,7 +513,7 @@ next_page:
95598 i += page_increm;
95599 start += page_increm * PAGE_SIZE;
95600 nr_pages -= page_increm;
95601- } while (nr_pages);
95602+ }
95603 return i;
95604 }
95605 EXPORT_SYMBOL(__get_user_pages);
95606diff --git a/mm/highmem.c b/mm/highmem.c
95607index 123bcd3..0de52ba 100644
95608--- a/mm/highmem.c
95609+++ b/mm/highmem.c
95610@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95611 * So no dangers, even with speculative execution.
95612 */
95613 page = pte_page(pkmap_page_table[i]);
95614+ pax_open_kernel();
95615 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95616-
95617+ pax_close_kernel();
95618 set_page_address(page, NULL);
95619 need_flush = 1;
95620 }
95621@@ -259,9 +260,11 @@ start:
95622 }
95623 }
95624 vaddr = PKMAP_ADDR(last_pkmap_nr);
95625+
95626+ pax_open_kernel();
95627 set_pte_at(&init_mm, vaddr,
95628 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95629-
95630+ pax_close_kernel();
95631 pkmap_count[last_pkmap_nr] = 1;
95632 set_page_address(page, (void *)vaddr);
95633
95634diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95635index 267e419..394bed9 100644
95636--- a/mm/hugetlb.c
95637+++ b/mm/hugetlb.c
95638@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95639 struct ctl_table *table, int write,
95640 void __user *buffer, size_t *length, loff_t *ppos)
95641 {
95642+ ctl_table_no_const t;
95643 struct hstate *h = &default_hstate;
95644 unsigned long tmp = h->max_huge_pages;
95645 int ret;
95646@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95647 if (!hugepages_supported())
95648 return -ENOTSUPP;
95649
95650- table->data = &tmp;
95651- table->maxlen = sizeof(unsigned long);
95652- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95653+ t = *table;
95654+ t.data = &tmp;
95655+ t.maxlen = sizeof(unsigned long);
95656+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95657 if (ret)
95658 goto out;
95659
95660@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95661 struct hstate *h = &default_hstate;
95662 unsigned long tmp;
95663 int ret;
95664+ ctl_table_no_const hugetlb_table;
95665
95666 if (!hugepages_supported())
95667 return -ENOTSUPP;
95668@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95669 if (write && hstate_is_gigantic(h))
95670 return -EINVAL;
95671
95672- table->data = &tmp;
95673- table->maxlen = sizeof(unsigned long);
95674- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95675+ hugetlb_table = *table;
95676+ hugetlb_table.data = &tmp;
95677+ hugetlb_table.maxlen = sizeof(unsigned long);
95678+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95679 if (ret)
95680 goto out;
95681
95682@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95683 i_mmap_unlock_write(mapping);
95684 }
95685
95686+#ifdef CONFIG_PAX_SEGMEXEC
95687+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95688+{
95689+ struct mm_struct *mm = vma->vm_mm;
95690+ struct vm_area_struct *vma_m;
95691+ unsigned long address_m;
95692+ pte_t *ptep_m;
95693+
95694+ vma_m = pax_find_mirror_vma(vma);
95695+ if (!vma_m)
95696+ return;
95697+
95698+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95699+ address_m = address + SEGMEXEC_TASK_SIZE;
95700+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95701+ get_page(page_m);
95702+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95703+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95704+}
95705+#endif
95706+
95707 /*
95708 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95709 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95710@@ -2910,6 +2935,11 @@ retry_avoidcopy:
95711 make_huge_pte(vma, new_page, 1));
95712 page_remove_rmap(old_page);
95713 hugepage_add_new_anon_rmap(new_page, vma, address);
95714+
95715+#ifdef CONFIG_PAX_SEGMEXEC
95716+ pax_mirror_huge_pte(vma, address, new_page);
95717+#endif
95718+
95719 /* Make the old page be freed below */
95720 new_page = old_page;
95721 }
95722@@ -3070,6 +3100,10 @@ retry:
95723 && (vma->vm_flags & VM_SHARED)));
95724 set_huge_pte_at(mm, address, ptep, new_pte);
95725
95726+#ifdef CONFIG_PAX_SEGMEXEC
95727+ pax_mirror_huge_pte(vma, address, page);
95728+#endif
95729+
95730 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95731 /* Optimization, do the COW without a second fault */
95732 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95733@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95734 struct address_space *mapping;
95735 int need_wait_lock = 0;
95736
95737+#ifdef CONFIG_PAX_SEGMEXEC
95738+ struct vm_area_struct *vma_m;
95739+#endif
95740+
95741 address &= huge_page_mask(h);
95742
95743 ptep = huge_pte_offset(mm, address);
95744@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95745 VM_FAULT_SET_HINDEX(hstate_index(h));
95746 }
95747
95748+#ifdef CONFIG_PAX_SEGMEXEC
95749+ vma_m = pax_find_mirror_vma(vma);
95750+ if (vma_m) {
95751+ unsigned long address_m;
95752+
95753+ if (vma->vm_start > vma_m->vm_start) {
95754+ address_m = address;
95755+ address -= SEGMEXEC_TASK_SIZE;
95756+ vma = vma_m;
95757+ h = hstate_vma(vma);
95758+ } else
95759+ address_m = address + SEGMEXEC_TASK_SIZE;
95760+
95761+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95762+ return VM_FAULT_OOM;
95763+ address_m &= HPAGE_MASK;
95764+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95765+ }
95766+#endif
95767+
95768 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95769 if (!ptep)
95770 return VM_FAULT_OOM;
95771diff --git a/mm/internal.h b/mm/internal.h
95772index efad241..57ae4ca 100644
95773--- a/mm/internal.h
95774+++ b/mm/internal.h
95775@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95776
95777 extern int __isolate_free_page(struct page *page, unsigned int order);
95778 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95779+extern void free_compound_page(struct page *page);
95780 extern void prep_compound_page(struct page *page, unsigned long order);
95781 #ifdef CONFIG_MEMORY_FAILURE
95782 extern bool is_free_buddy_page(struct page *page);
95783@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95784
95785 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95786 unsigned long, unsigned long,
95787- unsigned long, unsigned long);
95788+ unsigned long, unsigned long) __intentional_overflow(-1);
95789
95790 extern void set_pageblock_order(void);
95791 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95792diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95793index 3cda50c..032ba634 100644
95794--- a/mm/kmemleak.c
95795+++ b/mm/kmemleak.c
95796@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95797
95798 for (i = 0; i < object->trace_len; i++) {
95799 void *ptr = (void *)object->trace[i];
95800- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95801+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95802 }
95803 }
95804
95805@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95806 return -ENOMEM;
95807 }
95808
95809- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95810+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95811 &kmemleak_fops);
95812 if (!dentry)
95813 pr_warning("Failed to create the debugfs kmemleak file\n");
95814diff --git a/mm/maccess.c b/mm/maccess.c
95815index d53adf9..03a24bf 100644
95816--- a/mm/maccess.c
95817+++ b/mm/maccess.c
95818@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95819 set_fs(KERNEL_DS);
95820 pagefault_disable();
95821 ret = __copy_from_user_inatomic(dst,
95822- (__force const void __user *)src, size);
95823+ (const void __force_user *)src, size);
95824 pagefault_enable();
95825 set_fs(old_fs);
95826
95827@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95828
95829 set_fs(KERNEL_DS);
95830 pagefault_disable();
95831- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95832+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95833 pagefault_enable();
95834 set_fs(old_fs);
95835
95836diff --git a/mm/madvise.c b/mm/madvise.c
95837index a271adc..831d82f 100644
95838--- a/mm/madvise.c
95839+++ b/mm/madvise.c
95840@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95841 pgoff_t pgoff;
95842 unsigned long new_flags = vma->vm_flags;
95843
95844+#ifdef CONFIG_PAX_SEGMEXEC
95845+ struct vm_area_struct *vma_m;
95846+#endif
95847+
95848 switch (behavior) {
95849 case MADV_NORMAL:
95850 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95851@@ -126,6 +130,13 @@ success:
95852 /*
95853 * vm_flags is protected by the mmap_sem held in write mode.
95854 */
95855+
95856+#ifdef CONFIG_PAX_SEGMEXEC
95857+ vma_m = pax_find_mirror_vma(vma);
95858+ if (vma_m)
95859+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95860+#endif
95861+
95862 vma->vm_flags = new_flags;
95863
95864 out:
95865@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95866 struct vm_area_struct **prev,
95867 unsigned long start, unsigned long end)
95868 {
95869+
95870+#ifdef CONFIG_PAX_SEGMEXEC
95871+ struct vm_area_struct *vma_m;
95872+#endif
95873+
95874 *prev = vma;
95875 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95876 return -EINVAL;
95877@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95878 zap_page_range(vma, start, end - start, &details);
95879 } else
95880 zap_page_range(vma, start, end - start, NULL);
95881+
95882+#ifdef CONFIG_PAX_SEGMEXEC
95883+ vma_m = pax_find_mirror_vma(vma);
95884+ if (vma_m) {
95885+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95886+ struct zap_details details = {
95887+ .nonlinear_vma = vma_m,
95888+ .last_index = ULONG_MAX,
95889+ };
95890+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95891+ } else
95892+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95893+ }
95894+#endif
95895+
95896 return 0;
95897 }
95898
95899@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95900 if (end < start)
95901 return error;
95902
95903+#ifdef CONFIG_PAX_SEGMEXEC
95904+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95905+ if (end > SEGMEXEC_TASK_SIZE)
95906+ return error;
95907+ } else
95908+#endif
95909+
95910+ if (end > TASK_SIZE)
95911+ return error;
95912+
95913 error = 0;
95914 if (end == start)
95915 return error;
95916diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95917index 20c29dd..22bd8e2 100644
95918--- a/mm/memory-failure.c
95919+++ b/mm/memory-failure.c
95920@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95921
95922 int sysctl_memory_failure_recovery __read_mostly = 1;
95923
95924-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95925+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95926
95927 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95928
95929@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95930 pfn, t->comm, t->pid);
95931 si.si_signo = SIGBUS;
95932 si.si_errno = 0;
95933- si.si_addr = (void *)addr;
95934+ si.si_addr = (void __user *)addr;
95935 #ifdef __ARCH_SI_TRAPNO
95936 si.si_trapno = trapno;
95937 #endif
95938@@ -786,7 +786,7 @@ static struct page_state {
95939 unsigned long res;
95940 char *msg;
95941 int (*action)(struct page *p, unsigned long pfn);
95942-} error_states[] = {
95943+} __do_const error_states[] = {
95944 { reserved, reserved, "reserved kernel", me_kernel },
95945 /*
95946 * free pages are specially detected outside this table:
95947@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95948 nr_pages = 1 << compound_order(hpage);
95949 else /* normal page or thp */
95950 nr_pages = 1;
95951- atomic_long_add(nr_pages, &num_poisoned_pages);
95952+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95953
95954 /*
95955 * We need/can do nothing about count=0 pages.
95956@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95957 if (PageHWPoison(hpage)) {
95958 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95959 || (p != hpage && TestSetPageHWPoison(hpage))) {
95960- atomic_long_sub(nr_pages, &num_poisoned_pages);
95961+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95962 unlock_page(hpage);
95963 return 0;
95964 }
95965@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95966 */
95967 if (!PageHWPoison(p)) {
95968 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95969- atomic_long_sub(nr_pages, &num_poisoned_pages);
95970+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95971 put_page(hpage);
95972 res = 0;
95973 goto out;
95974 }
95975 if (hwpoison_filter(p)) {
95976 if (TestClearPageHWPoison(p))
95977- atomic_long_sub(nr_pages, &num_poisoned_pages);
95978+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95979 unlock_page(hpage);
95980 put_page(hpage);
95981 return 0;
95982@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
95983 return 0;
95984 }
95985 if (TestClearPageHWPoison(p))
95986- atomic_long_dec(&num_poisoned_pages);
95987+ atomic_long_dec_unchecked(&num_poisoned_pages);
95988 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95989 return 0;
95990 }
95991@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
95992 */
95993 if (TestClearPageHWPoison(page)) {
95994 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95995- atomic_long_sub(nr_pages, &num_poisoned_pages);
95996+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95997 freeit = 1;
95998 if (PageHuge(page))
95999 clear_page_hwpoison_huge_page(page);
96000@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96001 if (PageHuge(page)) {
96002 set_page_hwpoison_huge_page(hpage);
96003 dequeue_hwpoisoned_huge_page(hpage);
96004- atomic_long_add(1 << compound_order(hpage),
96005+ atomic_long_add_unchecked(1 << compound_order(hpage),
96006 &num_poisoned_pages);
96007 } else {
96008 SetPageHWPoison(page);
96009- atomic_long_inc(&num_poisoned_pages);
96010+ atomic_long_inc_unchecked(&num_poisoned_pages);
96011 }
96012 }
96013 return ret;
96014@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
96015 put_page(page);
96016 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96017 SetPageHWPoison(page);
96018- atomic_long_inc(&num_poisoned_pages);
96019+ atomic_long_inc_unchecked(&num_poisoned_pages);
96020 return 0;
96021 }
96022
96023@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
96024 if (!is_free_buddy_page(page))
96025 pr_info("soft offline: %#lx: page leaked\n",
96026 pfn);
96027- atomic_long_inc(&num_poisoned_pages);
96028+ atomic_long_inc_unchecked(&num_poisoned_pages);
96029 }
96030 } else {
96031 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96032@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
96033 if (PageHuge(page)) {
96034 set_page_hwpoison_huge_page(hpage);
96035 dequeue_hwpoisoned_huge_page(hpage);
96036- atomic_long_add(1 << compound_order(hpage),
96037+ atomic_long_add_unchecked(1 << compound_order(hpage),
96038 &num_poisoned_pages);
96039 } else {
96040 SetPageHWPoison(page);
96041- atomic_long_inc(&num_poisoned_pages);
96042+ atomic_long_inc_unchecked(&num_poisoned_pages);
96043 }
96044 }
96045 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96046diff --git a/mm/memory.c b/mm/memory.c
96047index 6aa7822..3c76005 100644
96048--- a/mm/memory.c
96049+++ b/mm/memory.c
96050@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96051 free_pte_range(tlb, pmd, addr);
96052 } while (pmd++, addr = next, addr != end);
96053
96054+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96055 start &= PUD_MASK;
96056 if (start < floor)
96057 return;
96058@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96059 pmd = pmd_offset(pud, start);
96060 pud_clear(pud);
96061 pmd_free_tlb(tlb, pmd, start);
96062+#endif
96063+
96064 }
96065
96066 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96067@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96068 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96069 } while (pud++, addr = next, addr != end);
96070
96071+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96072 start &= PGDIR_MASK;
96073 if (start < floor)
96074 return;
96075@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96076 pud = pud_offset(pgd, start);
96077 pgd_clear(pgd);
96078 pud_free_tlb(tlb, pud, start);
96079+#endif
96080+
96081 }
96082
96083 /*
96084@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96085 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96086 */
96087 if (vma->vm_ops)
96088- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96089+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96090 vma->vm_ops->fault);
96091 if (vma->vm_file)
96092- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96093+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96094 vma->vm_file->f_op->mmap);
96095 dump_stack();
96096 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96097@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96098 page_add_file_rmap(page);
96099 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96100
96101+#ifdef CONFIG_PAX_SEGMEXEC
96102+ pax_mirror_file_pte(vma, addr, page, ptl);
96103+#endif
96104+
96105 retval = 0;
96106 pte_unmap_unlock(pte, ptl);
96107 return retval;
96108@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96109 if (!page_count(page))
96110 return -EINVAL;
96111 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96112+
96113+#ifdef CONFIG_PAX_SEGMEXEC
96114+ struct vm_area_struct *vma_m;
96115+#endif
96116+
96117 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96118 BUG_ON(vma->vm_flags & VM_PFNMAP);
96119 vma->vm_flags |= VM_MIXEDMAP;
96120+
96121+#ifdef CONFIG_PAX_SEGMEXEC
96122+ vma_m = pax_find_mirror_vma(vma);
96123+ if (vma_m)
96124+ vma_m->vm_flags |= VM_MIXEDMAP;
96125+#endif
96126+
96127 }
96128 return insert_page(vma, addr, page, vma->vm_page_prot);
96129 }
96130@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96131 unsigned long pfn)
96132 {
96133 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96134+ BUG_ON(vma->vm_mirror);
96135
96136 if (addr < vma->vm_start || addr >= vma->vm_end)
96137 return -EFAULT;
96138@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96139
96140 BUG_ON(pud_huge(*pud));
96141
96142- pmd = pmd_alloc(mm, pud, addr);
96143+ pmd = (mm == &init_mm) ?
96144+ pmd_alloc_kernel(mm, pud, addr) :
96145+ pmd_alloc(mm, pud, addr);
96146 if (!pmd)
96147 return -ENOMEM;
96148 do {
96149@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96150 unsigned long next;
96151 int err;
96152
96153- pud = pud_alloc(mm, pgd, addr);
96154+ pud = (mm == &init_mm) ?
96155+ pud_alloc_kernel(mm, pgd, addr) :
96156+ pud_alloc(mm, pgd, addr);
96157 if (!pud)
96158 return -ENOMEM;
96159 do {
96160@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96161 return ret;
96162 }
96163
96164+#ifdef CONFIG_PAX_SEGMEXEC
96165+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96166+{
96167+ struct mm_struct *mm = vma->vm_mm;
96168+ spinlock_t *ptl;
96169+ pte_t *pte, entry;
96170+
96171+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96172+ entry = *pte;
96173+ if (!pte_present(entry)) {
96174+ if (!pte_none(entry)) {
96175+ BUG_ON(pte_file(entry));
96176+ free_swap_and_cache(pte_to_swp_entry(entry));
96177+ pte_clear_not_present_full(mm, address, pte, 0);
96178+ }
96179+ } else {
96180+ struct page *page;
96181+
96182+ flush_cache_page(vma, address, pte_pfn(entry));
96183+ entry = ptep_clear_flush(vma, address, pte);
96184+ BUG_ON(pte_dirty(entry));
96185+ page = vm_normal_page(vma, address, entry);
96186+ if (page) {
96187+ update_hiwater_rss(mm);
96188+ if (PageAnon(page))
96189+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96190+ else
96191+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96192+ page_remove_rmap(page);
96193+ page_cache_release(page);
96194+ }
96195+ }
96196+ pte_unmap_unlock(pte, ptl);
96197+}
96198+
96199+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96200+ *
96201+ * the ptl of the lower mapped page is held on entry and is not released on exit
96202+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96203+ */
96204+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96205+{
96206+ struct mm_struct *mm = vma->vm_mm;
96207+ unsigned long address_m;
96208+ spinlock_t *ptl_m;
96209+ struct vm_area_struct *vma_m;
96210+ pmd_t *pmd_m;
96211+ pte_t *pte_m, entry_m;
96212+
96213+ BUG_ON(!page_m || !PageAnon(page_m));
96214+
96215+ vma_m = pax_find_mirror_vma(vma);
96216+ if (!vma_m)
96217+ return;
96218+
96219+ BUG_ON(!PageLocked(page_m));
96220+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96221+ address_m = address + SEGMEXEC_TASK_SIZE;
96222+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96223+ pte_m = pte_offset_map(pmd_m, address_m);
96224+ ptl_m = pte_lockptr(mm, pmd_m);
96225+ if (ptl != ptl_m) {
96226+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96227+ if (!pte_none(*pte_m))
96228+ goto out;
96229+ }
96230+
96231+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96232+ page_cache_get(page_m);
96233+ page_add_anon_rmap(page_m, vma_m, address_m);
96234+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96235+ set_pte_at(mm, address_m, pte_m, entry_m);
96236+ update_mmu_cache(vma_m, address_m, pte_m);
96237+out:
96238+ if (ptl != ptl_m)
96239+ spin_unlock(ptl_m);
96240+ pte_unmap(pte_m);
96241+ unlock_page(page_m);
96242+}
96243+
96244+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96245+{
96246+ struct mm_struct *mm = vma->vm_mm;
96247+ unsigned long address_m;
96248+ spinlock_t *ptl_m;
96249+ struct vm_area_struct *vma_m;
96250+ pmd_t *pmd_m;
96251+ pte_t *pte_m, entry_m;
96252+
96253+ BUG_ON(!page_m || PageAnon(page_m));
96254+
96255+ vma_m = pax_find_mirror_vma(vma);
96256+ if (!vma_m)
96257+ return;
96258+
96259+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96260+ address_m = address + SEGMEXEC_TASK_SIZE;
96261+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96262+ pte_m = pte_offset_map(pmd_m, address_m);
96263+ ptl_m = pte_lockptr(mm, pmd_m);
96264+ if (ptl != ptl_m) {
96265+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96266+ if (!pte_none(*pte_m))
96267+ goto out;
96268+ }
96269+
96270+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96271+ page_cache_get(page_m);
96272+ page_add_file_rmap(page_m);
96273+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96274+ set_pte_at(mm, address_m, pte_m, entry_m);
96275+ update_mmu_cache(vma_m, address_m, pte_m);
96276+out:
96277+ if (ptl != ptl_m)
96278+ spin_unlock(ptl_m);
96279+ pte_unmap(pte_m);
96280+}
96281+
96282+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96283+{
96284+ struct mm_struct *mm = vma->vm_mm;
96285+ unsigned long address_m;
96286+ spinlock_t *ptl_m;
96287+ struct vm_area_struct *vma_m;
96288+ pmd_t *pmd_m;
96289+ pte_t *pte_m, entry_m;
96290+
96291+ vma_m = pax_find_mirror_vma(vma);
96292+ if (!vma_m)
96293+ return;
96294+
96295+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96296+ address_m = address + SEGMEXEC_TASK_SIZE;
96297+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96298+ pte_m = pte_offset_map(pmd_m, address_m);
96299+ ptl_m = pte_lockptr(mm, pmd_m);
96300+ if (ptl != ptl_m) {
96301+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96302+ if (!pte_none(*pte_m))
96303+ goto out;
96304+ }
96305+
96306+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96307+ set_pte_at(mm, address_m, pte_m, entry_m);
96308+out:
96309+ if (ptl != ptl_m)
96310+ spin_unlock(ptl_m);
96311+ pte_unmap(pte_m);
96312+}
96313+
96314+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96315+{
96316+ struct page *page_m;
96317+ pte_t entry;
96318+
96319+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96320+ goto out;
96321+
96322+ entry = *pte;
96323+ page_m = vm_normal_page(vma, address, entry);
96324+ if (!page_m)
96325+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96326+ else if (PageAnon(page_m)) {
96327+ if (pax_find_mirror_vma(vma)) {
96328+ pte_unmap_unlock(pte, ptl);
96329+ lock_page(page_m);
96330+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96331+ if (pte_same(entry, *pte))
96332+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96333+ else
96334+ unlock_page(page_m);
96335+ }
96336+ } else
96337+ pax_mirror_file_pte(vma, address, page_m, ptl);
96338+
96339+out:
96340+ pte_unmap_unlock(pte, ptl);
96341+}
96342+#endif
96343+
96344 /*
96345 * This routine handles present pages, when users try to write
96346 * to a shared page. It is done by copying the page to a new address
96347@@ -2212,6 +2419,12 @@ gotten:
96348 */
96349 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96350 if (likely(pte_same(*page_table, orig_pte))) {
96351+
96352+#ifdef CONFIG_PAX_SEGMEXEC
96353+ if (pax_find_mirror_vma(vma))
96354+ BUG_ON(!trylock_page(new_page));
96355+#endif
96356+
96357 if (old_page) {
96358 if (!PageAnon(old_page)) {
96359 dec_mm_counter_fast(mm, MM_FILEPAGES);
96360@@ -2265,6 +2478,10 @@ gotten:
96361 page_remove_rmap(old_page);
96362 }
96363
96364+#ifdef CONFIG_PAX_SEGMEXEC
96365+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96366+#endif
96367+
96368 /* Free the old page.. */
96369 new_page = old_page;
96370 ret |= VM_FAULT_WRITE;
96371@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96372 swap_free(entry);
96373 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96374 try_to_free_swap(page);
96375+
96376+#ifdef CONFIG_PAX_SEGMEXEC
96377+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96378+#endif
96379+
96380 unlock_page(page);
96381 if (page != swapcache) {
96382 /*
96383@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96384
96385 /* No need to invalidate - it was non-present before */
96386 update_mmu_cache(vma, address, page_table);
96387+
96388+#ifdef CONFIG_PAX_SEGMEXEC
96389+ pax_mirror_anon_pte(vma, address, page, ptl);
96390+#endif
96391+
96392 unlock:
96393 pte_unmap_unlock(page_table, ptl);
96394 out:
96395@@ -2581,40 +2808,6 @@ out_release:
96396 }
96397
96398 /*
96399- * This is like a special single-page "expand_{down|up}wards()",
96400- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96401- * doesn't hit another vma.
96402- */
96403-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96404-{
96405- address &= PAGE_MASK;
96406- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96407- struct vm_area_struct *prev = vma->vm_prev;
96408-
96409- /*
96410- * Is there a mapping abutting this one below?
96411- *
96412- * That's only ok if it's the same stack mapping
96413- * that has gotten split..
96414- */
96415- if (prev && prev->vm_end == address)
96416- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96417-
96418- return expand_downwards(vma, address - PAGE_SIZE);
96419- }
96420- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96421- struct vm_area_struct *next = vma->vm_next;
96422-
96423- /* As VM_GROWSDOWN but s/below/above/ */
96424- if (next && next->vm_start == address + PAGE_SIZE)
96425- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96426-
96427- return expand_upwards(vma, address + PAGE_SIZE);
96428- }
96429- return 0;
96430-}
96431-
96432-/*
96433 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96434 * but allow concurrent faults), and pte mapped but not yet locked.
96435 * We return with mmap_sem still held, but pte unmapped and unlocked.
96436@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96437 unsigned int flags)
96438 {
96439 struct mem_cgroup *memcg;
96440- struct page *page;
96441+ struct page *page = NULL;
96442 spinlock_t *ptl;
96443 pte_t entry;
96444
96445- pte_unmap(page_table);
96446-
96447- /* Check if we need to add a guard page to the stack */
96448- if (check_stack_guard_page(vma, address) < 0)
96449- return VM_FAULT_SIGSEGV;
96450-
96451- /* Use the zero-page for reads */
96452 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96453 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96454 vma->vm_page_prot));
96455- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96456+ ptl = pte_lockptr(mm, pmd);
96457+ spin_lock(ptl);
96458 if (!pte_none(*page_table))
96459 goto unlock;
96460 goto setpte;
96461 }
96462
96463 /* Allocate our own private page. */
96464+ pte_unmap(page_table);
96465+
96466 if (unlikely(anon_vma_prepare(vma)))
96467 goto oom;
96468 page = alloc_zeroed_user_highpage_movable(vma, address);
96469@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96470 if (!pte_none(*page_table))
96471 goto release;
96472
96473+#ifdef CONFIG_PAX_SEGMEXEC
96474+ if (pax_find_mirror_vma(vma))
96475+ BUG_ON(!trylock_page(page));
96476+#endif
96477+
96478 inc_mm_counter_fast(mm, MM_ANONPAGES);
96479 page_add_new_anon_rmap(page, vma, address);
96480 mem_cgroup_commit_charge(page, memcg, false);
96481@@ -2677,6 +2871,12 @@ setpte:
96482
96483 /* No need to invalidate - it was non-present before */
96484 update_mmu_cache(vma, address, page_table);
96485+
96486+#ifdef CONFIG_PAX_SEGMEXEC
96487+ if (page)
96488+ pax_mirror_anon_pte(vma, address, page, ptl);
96489+#endif
96490+
96491 unlock:
96492 pte_unmap_unlock(page_table, ptl);
96493 return 0;
96494@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96495 return ret;
96496 }
96497 do_set_pte(vma, address, fault_page, pte, false, false);
96498+
96499+#ifdef CONFIG_PAX_SEGMEXEC
96500+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96501+#endif
96502+
96503 unlock_page(fault_page);
96504 unlock_out:
96505 pte_unmap_unlock(pte, ptl);
96506@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96507 page_cache_release(fault_page);
96508 goto uncharge_out;
96509 }
96510+
96511+#ifdef CONFIG_PAX_SEGMEXEC
96512+ if (pax_find_mirror_vma(vma))
96513+ BUG_ON(!trylock_page(new_page));
96514+#endif
96515+
96516 do_set_pte(vma, address, new_page, pte, true, true);
96517+
96518+#ifdef CONFIG_PAX_SEGMEXEC
96519+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96520+#endif
96521+
96522 mem_cgroup_commit_charge(new_page, memcg, false);
96523 lru_cache_add_active_or_unevictable(new_page, vma);
96524 pte_unmap_unlock(pte, ptl);
96525@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96526 return ret;
96527 }
96528 do_set_pte(vma, address, fault_page, pte, true, false);
96529+
96530+#ifdef CONFIG_PAX_SEGMEXEC
96531+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96532+#endif
96533+
96534 pte_unmap_unlock(pte, ptl);
96535
96536 if (set_page_dirty(fault_page))
96537@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96538 if (flags & FAULT_FLAG_WRITE)
96539 flush_tlb_fix_spurious_fault(vma, address);
96540 }
96541+
96542+#ifdef CONFIG_PAX_SEGMEXEC
96543+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96544+ return 0;
96545+#endif
96546+
96547 unlock:
96548 pte_unmap_unlock(pte, ptl);
96549 return 0;
96550@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96551 pmd_t *pmd;
96552 pte_t *pte;
96553
96554+#ifdef CONFIG_PAX_SEGMEXEC
96555+ struct vm_area_struct *vma_m;
96556+#endif
96557+
96558 if (unlikely(is_vm_hugetlb_page(vma)))
96559 return hugetlb_fault(mm, vma, address, flags);
96560
96561+#ifdef CONFIG_PAX_SEGMEXEC
96562+ vma_m = pax_find_mirror_vma(vma);
96563+ if (vma_m) {
96564+ unsigned long address_m;
96565+ pgd_t *pgd_m;
96566+ pud_t *pud_m;
96567+ pmd_t *pmd_m;
96568+
96569+ if (vma->vm_start > vma_m->vm_start) {
96570+ address_m = address;
96571+ address -= SEGMEXEC_TASK_SIZE;
96572+ vma = vma_m;
96573+ } else
96574+ address_m = address + SEGMEXEC_TASK_SIZE;
96575+
96576+ pgd_m = pgd_offset(mm, address_m);
96577+ pud_m = pud_alloc(mm, pgd_m, address_m);
96578+ if (!pud_m)
96579+ return VM_FAULT_OOM;
96580+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96581+ if (!pmd_m)
96582+ return VM_FAULT_OOM;
96583+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96584+ return VM_FAULT_OOM;
96585+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96586+ }
96587+#endif
96588+
96589 pgd = pgd_offset(mm, address);
96590 pud = pud_alloc(mm, pgd, address);
96591 if (!pud)
96592@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96593 spin_unlock(&mm->page_table_lock);
96594 return 0;
96595 }
96596+
96597+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96598+{
96599+ pud_t *new = pud_alloc_one(mm, address);
96600+ if (!new)
96601+ return -ENOMEM;
96602+
96603+ smp_wmb(); /* See comment in __pte_alloc */
96604+
96605+ spin_lock(&mm->page_table_lock);
96606+ if (pgd_present(*pgd)) /* Another has populated it */
96607+ pud_free(mm, new);
96608+ else
96609+ pgd_populate_kernel(mm, pgd, new);
96610+ spin_unlock(&mm->page_table_lock);
96611+ return 0;
96612+}
96613 #endif /* __PAGETABLE_PUD_FOLDED */
96614
96615 #ifndef __PAGETABLE_PMD_FOLDED
96616@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96617 spin_unlock(&mm->page_table_lock);
96618 return 0;
96619 }
96620+
96621+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96622+{
96623+ pmd_t *new = pmd_alloc_one(mm, address);
96624+ if (!new)
96625+ return -ENOMEM;
96626+
96627+ smp_wmb(); /* See comment in __pte_alloc */
96628+
96629+ spin_lock(&mm->page_table_lock);
96630+#ifndef __ARCH_HAS_4LEVEL_HACK
96631+ if (pud_present(*pud)) /* Another has populated it */
96632+ pmd_free(mm, new);
96633+ else
96634+ pud_populate_kernel(mm, pud, new);
96635+#else
96636+ if (pgd_present(*pud)) /* Another has populated it */
96637+ pmd_free(mm, new);
96638+ else
96639+ pgd_populate_kernel(mm, pud, new);
96640+#endif /* __ARCH_HAS_4LEVEL_HACK */
96641+ spin_unlock(&mm->page_table_lock);
96642+ return 0;
96643+}
96644 #endif /* __PAGETABLE_PMD_FOLDED */
96645
96646 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96647@@ -3550,8 +3850,8 @@ out:
96648 return ret;
96649 }
96650
96651-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96652- void *buf, int len, int write)
96653+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96654+ void *buf, size_t len, int write)
96655 {
96656 resource_size_t phys_addr;
96657 unsigned long prot = 0;
96658@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96659 * Access another process' address space as given in mm. If non-NULL, use the
96660 * given task for page fault accounting.
96661 */
96662-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96663- unsigned long addr, void *buf, int len, int write)
96664+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96665+ unsigned long addr, void *buf, size_t len, int write)
96666 {
96667 struct vm_area_struct *vma;
96668 void *old_buf = buf;
96669@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96670 down_read(&mm->mmap_sem);
96671 /* ignore errors, just check how much was successfully transferred */
96672 while (len) {
96673- int bytes, ret, offset;
96674+ ssize_t bytes, ret, offset;
96675 void *maddr;
96676 struct page *page = NULL;
96677
96678@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96679 *
96680 * The caller must hold a reference on @mm.
96681 */
96682-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96683- void *buf, int len, int write)
96684+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96685+ void *buf, size_t len, int write)
96686 {
96687 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96688 }
96689@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96690 * Source/target buffer must be kernel space,
96691 * Do not walk the page table directly, use get_user_pages
96692 */
96693-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96694- void *buf, int len, int write)
96695+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96696+ void *buf, size_t len, int write)
96697 {
96698 struct mm_struct *mm;
96699- int ret;
96700+ ssize_t ret;
96701
96702 mm = get_task_mm(tsk);
96703 if (!mm)
96704diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96705index 0e0961b..c9143b9 100644
96706--- a/mm/mempolicy.c
96707+++ b/mm/mempolicy.c
96708@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96709 unsigned long vmstart;
96710 unsigned long vmend;
96711
96712+#ifdef CONFIG_PAX_SEGMEXEC
96713+ struct vm_area_struct *vma_m;
96714+#endif
96715+
96716 vma = find_vma(mm, start);
96717 if (!vma || vma->vm_start > start)
96718 return -EFAULT;
96719@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96720 err = vma_replace_policy(vma, new_pol);
96721 if (err)
96722 goto out;
96723+
96724+#ifdef CONFIG_PAX_SEGMEXEC
96725+ vma_m = pax_find_mirror_vma(vma);
96726+ if (vma_m) {
96727+ err = vma_replace_policy(vma_m, new_pol);
96728+ if (err)
96729+ goto out;
96730+ }
96731+#endif
96732+
96733 }
96734
96735 out:
96736@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96737
96738 if (end < start)
96739 return -EINVAL;
96740+
96741+#ifdef CONFIG_PAX_SEGMEXEC
96742+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96743+ if (end > SEGMEXEC_TASK_SIZE)
96744+ return -EINVAL;
96745+ } else
96746+#endif
96747+
96748+ if (end > TASK_SIZE)
96749+ return -EINVAL;
96750+
96751 if (end == start)
96752 return 0;
96753
96754@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96755 */
96756 tcred = __task_cred(task);
96757 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96758- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96759- !capable(CAP_SYS_NICE)) {
96760+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96761 rcu_read_unlock();
96762 err = -EPERM;
96763 goto out_put;
96764@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96765 goto out;
96766 }
96767
96768+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96769+ if (mm != current->mm &&
96770+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96771+ mmput(mm);
96772+ err = -EPERM;
96773+ goto out;
96774+ }
96775+#endif
96776+
96777 err = do_migrate_pages(mm, old, new,
96778 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96779
96780diff --git a/mm/migrate.c b/mm/migrate.c
96781index 344cdf6..07399500 100644
96782--- a/mm/migrate.c
96783+++ b/mm/migrate.c
96784@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96785 */
96786 tcred = __task_cred(task);
96787 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96788- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96789- !capable(CAP_SYS_NICE)) {
96790+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96791 rcu_read_unlock();
96792 err = -EPERM;
96793 goto out;
96794diff --git a/mm/mlock.c b/mm/mlock.c
96795index 73cf098..ab547c7 100644
96796--- a/mm/mlock.c
96797+++ b/mm/mlock.c
96798@@ -14,6 +14,7 @@
96799 #include <linux/pagevec.h>
96800 #include <linux/mempolicy.h>
96801 #include <linux/syscalls.h>
96802+#include <linux/security.h>
96803 #include <linux/sched.h>
96804 #include <linux/export.h>
96805 #include <linux/rmap.h>
96806@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96807 {
96808 unsigned long nstart, end, tmp;
96809 struct vm_area_struct * vma, * prev;
96810- int error;
96811+ int error = 0;
96812
96813 VM_BUG_ON(start & ~PAGE_MASK);
96814 VM_BUG_ON(len != PAGE_ALIGN(len));
96815@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96816 return -EINVAL;
96817 if (end == start)
96818 return 0;
96819+ if (end > TASK_SIZE)
96820+ return -EINVAL;
96821+
96822 vma = find_vma(current->mm, start);
96823 if (!vma || vma->vm_start > start)
96824 return -ENOMEM;
96825@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96826 for (nstart = start ; ; ) {
96827 vm_flags_t newflags;
96828
96829+#ifdef CONFIG_PAX_SEGMEXEC
96830+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96831+ break;
96832+#endif
96833+
96834 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96835
96836 newflags = vma->vm_flags & ~VM_LOCKED;
96837@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96838 locked += current->mm->locked_vm;
96839
96840 /* check against resource limits */
96841+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96842 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96843 error = do_mlock(start, len, 1);
96844
96845@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96846 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96847 vm_flags_t newflags;
96848
96849+#ifdef CONFIG_PAX_SEGMEXEC
96850+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96851+ break;
96852+#endif
96853+
96854 newflags = vma->vm_flags & ~VM_LOCKED;
96855 if (flags & MCL_CURRENT)
96856 newflags |= VM_LOCKED;
96857@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96858 lock_limit >>= PAGE_SHIFT;
96859
96860 ret = -ENOMEM;
96861+
96862+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96863+
96864 down_write(&current->mm->mmap_sem);
96865-
96866 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96867 capable(CAP_IPC_LOCK))
96868 ret = do_mlockall(flags);
96869diff --git a/mm/mmap.c b/mm/mmap.c
96870index e5cc3ca..bb9333f 100644
96871--- a/mm/mmap.c
96872+++ b/mm/mmap.c
96873@@ -41,6 +41,7 @@
96874 #include <linux/notifier.h>
96875 #include <linux/memory.h>
96876 #include <linux/printk.h>
96877+#include <linux/random.h>
96878
96879 #include <asm/uaccess.h>
96880 #include <asm/cacheflush.h>
96881@@ -57,6 +58,16 @@
96882 #define arch_rebalance_pgtables(addr, len) (addr)
96883 #endif
96884
96885+static inline void verify_mm_writelocked(struct mm_struct *mm)
96886+{
96887+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96888+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96889+ up_read(&mm->mmap_sem);
96890+ BUG();
96891+ }
96892+#endif
96893+}
96894+
96895 static void unmap_region(struct mm_struct *mm,
96896 struct vm_area_struct *vma, struct vm_area_struct *prev,
96897 unsigned long start, unsigned long end);
96898@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96899 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96900 *
96901 */
96902-pgprot_t protection_map[16] = {
96903+pgprot_t protection_map[16] __read_only = {
96904 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96905 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96906 };
96907
96908-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96909+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96910 {
96911- return __pgprot(pgprot_val(protection_map[vm_flags &
96912+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96913 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96914 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96915+
96916+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96917+ if (!(__supported_pte_mask & _PAGE_NX) &&
96918+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96919+ (vm_flags & (VM_READ | VM_WRITE)))
96920+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96921+#endif
96922+
96923+ return prot;
96924 }
96925 EXPORT_SYMBOL(vm_get_page_prot);
96926
96927@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96928 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96929 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96930 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96931+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96932 /*
96933 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96934 * other variables. It can be updated by several CPUs frequently.
96935@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96936 struct vm_area_struct *next = vma->vm_next;
96937
96938 might_sleep();
96939+ BUG_ON(vma->vm_mirror);
96940 if (vma->vm_ops && vma->vm_ops->close)
96941 vma->vm_ops->close(vma);
96942 if (vma->vm_file)
96943@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
96944
96945 SYSCALL_DEFINE1(brk, unsigned long, brk)
96946 {
96947+ unsigned long rlim;
96948 unsigned long retval;
96949 unsigned long newbrk, oldbrk;
96950 struct mm_struct *mm = current->mm;
96951@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96952 * segment grow beyond its set limit the in case where the limit is
96953 * not page aligned -Ram Gupta
96954 */
96955- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
96956+ rlim = rlimit(RLIMIT_DATA);
96957+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96958+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96959+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96960+ rlim = 4096 * PAGE_SIZE;
96961+#endif
96962+ if (check_data_rlimit(rlim, brk, mm->start_brk,
96963 mm->end_data, mm->start_data))
96964 goto out;
96965
96966@@ -978,6 +1007,12 @@ static int
96967 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96968 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96969 {
96970+
96971+#ifdef CONFIG_PAX_SEGMEXEC
96972+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96973+ return 0;
96974+#endif
96975+
96976 if (is_mergeable_vma(vma, file, vm_flags) &&
96977 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96978 if (vma->vm_pgoff == vm_pgoff)
96979@@ -997,6 +1032,12 @@ static int
96980 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96981 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96982 {
96983+
96984+#ifdef CONFIG_PAX_SEGMEXEC
96985+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96986+ return 0;
96987+#endif
96988+
96989 if (is_mergeable_vma(vma, file, vm_flags) &&
96990 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96991 pgoff_t vm_pglen;
96992@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96993 struct vm_area_struct *area, *next;
96994 int err;
96995
96996+#ifdef CONFIG_PAX_SEGMEXEC
96997+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96998+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96999+
97000+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97001+#endif
97002+
97003 /*
97004 * We later require that vma->vm_flags == vm_flags,
97005 * so this tests vma->vm_flags & VM_SPECIAL, too.
97006@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97007 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97008 next = next->vm_next;
97009
97010+#ifdef CONFIG_PAX_SEGMEXEC
97011+ if (prev)
97012+ prev_m = pax_find_mirror_vma(prev);
97013+ if (area)
97014+ area_m = pax_find_mirror_vma(area);
97015+ if (next)
97016+ next_m = pax_find_mirror_vma(next);
97017+#endif
97018+
97019 /*
97020 * Can it merge with the predecessor?
97021 */
97022@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97023 /* cases 1, 6 */
97024 err = vma_adjust(prev, prev->vm_start,
97025 next->vm_end, prev->vm_pgoff, NULL);
97026- } else /* cases 2, 5, 7 */
97027+
97028+#ifdef CONFIG_PAX_SEGMEXEC
97029+ if (!err && prev_m)
97030+ err = vma_adjust(prev_m, prev_m->vm_start,
97031+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97032+#endif
97033+
97034+ } else { /* cases 2, 5, 7 */
97035 err = vma_adjust(prev, prev->vm_start,
97036 end, prev->vm_pgoff, NULL);
97037+
97038+#ifdef CONFIG_PAX_SEGMEXEC
97039+ if (!err && prev_m)
97040+ err = vma_adjust(prev_m, prev_m->vm_start,
97041+ end_m, prev_m->vm_pgoff, NULL);
97042+#endif
97043+
97044+ }
97045 if (err)
97046 return NULL;
97047 khugepaged_enter_vma_merge(prev, vm_flags);
97048@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97049 mpol_equal(policy, vma_policy(next)) &&
97050 can_vma_merge_before(next, vm_flags,
97051 anon_vma, file, pgoff+pglen)) {
97052- if (prev && addr < prev->vm_end) /* case 4 */
97053+ if (prev && addr < prev->vm_end) { /* case 4 */
97054 err = vma_adjust(prev, prev->vm_start,
97055 addr, prev->vm_pgoff, NULL);
97056- else /* cases 3, 8 */
97057+
97058+#ifdef CONFIG_PAX_SEGMEXEC
97059+ if (!err && prev_m)
97060+ err = vma_adjust(prev_m, prev_m->vm_start,
97061+ addr_m, prev_m->vm_pgoff, NULL);
97062+#endif
97063+
97064+ } else { /* cases 3, 8 */
97065 err = vma_adjust(area, addr, next->vm_end,
97066 next->vm_pgoff - pglen, NULL);
97067+
97068+#ifdef CONFIG_PAX_SEGMEXEC
97069+ if (!err && area_m)
97070+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97071+ next_m->vm_pgoff - pglen, NULL);
97072+#endif
97073+
97074+ }
97075 if (err)
97076 return NULL;
97077 khugepaged_enter_vma_merge(area, vm_flags);
97078@@ -1210,8 +1297,10 @@ none:
97079 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97080 struct file *file, long pages)
97081 {
97082- const unsigned long stack_flags
97083- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97084+
97085+#ifdef CONFIG_PAX_RANDMMAP
97086+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97087+#endif
97088
97089 mm->total_vm += pages;
97090
97091@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97092 mm->shared_vm += pages;
97093 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97094 mm->exec_vm += pages;
97095- } else if (flags & stack_flags)
97096+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97097 mm->stack_vm += pages;
97098 }
97099 #endif /* CONFIG_PROC_FS */
97100@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97101 locked += mm->locked_vm;
97102 lock_limit = rlimit(RLIMIT_MEMLOCK);
97103 lock_limit >>= PAGE_SHIFT;
97104+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97105 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97106 return -EAGAIN;
97107 }
97108@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97109 * (the exception is when the underlying filesystem is noexec
97110 * mounted, in which case we dont add PROT_EXEC.)
97111 */
97112- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97113+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97114 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97115 prot |= PROT_EXEC;
97116
97117@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97118 /* Obtain the address to map to. we verify (or select) it and ensure
97119 * that it represents a valid section of the address space.
97120 */
97121- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97122+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97123 if (addr & ~PAGE_MASK)
97124 return addr;
97125
97126@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97127 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97128 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97129
97130+#ifdef CONFIG_PAX_MPROTECT
97131+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97132+
97133+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97134+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97135+ mm->binfmt->handle_mmap)
97136+ mm->binfmt->handle_mmap(file);
97137+#endif
97138+
97139+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97140+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97141+ gr_log_rwxmmap(file);
97142+
97143+#ifdef CONFIG_PAX_EMUPLT
97144+ vm_flags &= ~VM_EXEC;
97145+#else
97146+ return -EPERM;
97147+#endif
97148+
97149+ }
97150+
97151+ if (!(vm_flags & VM_EXEC))
97152+ vm_flags &= ~VM_MAYEXEC;
97153+#else
97154+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97155+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97156+#endif
97157+ else
97158+ vm_flags &= ~VM_MAYWRITE;
97159+ }
97160+#endif
97161+
97162+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97163+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97164+ vm_flags &= ~VM_PAGEEXEC;
97165+#endif
97166+
97167 if (flags & MAP_LOCKED)
97168 if (!can_do_mlock())
97169 return -EPERM;
97170@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97171 vm_flags |= VM_NORESERVE;
97172 }
97173
97174+ if (!gr_acl_handle_mmap(file, prot))
97175+ return -EACCES;
97176+
97177 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97178 if (!IS_ERR_VALUE(addr) &&
97179 ((vm_flags & VM_LOCKED) ||
97180@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97181 vm_flags_t vm_flags = vma->vm_flags;
97182
97183 /* If it was private or non-writable, the write bit is already clear */
97184- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97185+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97186 return 0;
97187
97188 /* The backer wishes to know when pages are first written to? */
97189@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97190 struct rb_node **rb_link, *rb_parent;
97191 unsigned long charged = 0;
97192
97193+#ifdef CONFIG_PAX_SEGMEXEC
97194+ struct vm_area_struct *vma_m = NULL;
97195+#endif
97196+
97197+ /*
97198+ * mm->mmap_sem is required to protect against another thread
97199+ * changing the mappings in case we sleep.
97200+ */
97201+ verify_mm_writelocked(mm);
97202+
97203 /* Check against address space limit. */
97204+
97205+#ifdef CONFIG_PAX_RANDMMAP
97206+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97207+#endif
97208+
97209 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97210 unsigned long nr_pages;
97211
97212@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97213
97214 /* Clear old maps */
97215 error = -ENOMEM;
97216-munmap_back:
97217 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97218 if (do_munmap(mm, addr, len))
97219 return -ENOMEM;
97220- goto munmap_back;
97221+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97222 }
97223
97224 /*
97225@@ -1597,6 +1741,16 @@ munmap_back:
97226 goto unacct_error;
97227 }
97228
97229+#ifdef CONFIG_PAX_SEGMEXEC
97230+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97231+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97232+ if (!vma_m) {
97233+ error = -ENOMEM;
97234+ goto free_vma;
97235+ }
97236+ }
97237+#endif
97238+
97239 vma->vm_mm = mm;
97240 vma->vm_start = addr;
97241 vma->vm_end = addr + len;
97242@@ -1627,6 +1781,13 @@ munmap_back:
97243 if (error)
97244 goto unmap_and_free_vma;
97245
97246+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97247+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97248+ vma->vm_flags |= VM_PAGEEXEC;
97249+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97250+ }
97251+#endif
97252+
97253 /* Can addr have changed??
97254 *
97255 * Answer: Yes, several device drivers can do it in their
97256@@ -1645,6 +1806,12 @@ munmap_back:
97257 }
97258
97259 vma_link(mm, vma, prev, rb_link, rb_parent);
97260+
97261+#ifdef CONFIG_PAX_SEGMEXEC
97262+ if (vma_m)
97263+ BUG_ON(pax_mirror_vma(vma_m, vma));
97264+#endif
97265+
97266 /* Once vma denies write, undo our temporary denial count */
97267 if (file) {
97268 if (vm_flags & VM_SHARED)
97269@@ -1657,6 +1824,7 @@ out:
97270 perf_event_mmap(vma);
97271
97272 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97273+ track_exec_limit(mm, addr, addr + len, vm_flags);
97274 if (vm_flags & VM_LOCKED) {
97275 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97276 vma == get_gate_vma(current->mm)))
97277@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
97278 if (vm_flags & VM_DENYWRITE)
97279 allow_write_access(file);
97280 free_vma:
97281+
97282+#ifdef CONFIG_PAX_SEGMEXEC
97283+ if (vma_m)
97284+ kmem_cache_free(vm_area_cachep, vma_m);
97285+#endif
97286+
97287 kmem_cache_free(vm_area_cachep, vma);
97288 unacct_error:
97289 if (charged)
97290@@ -1701,7 +1875,63 @@ unacct_error:
97291 return error;
97292 }
97293
97294-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97295+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97296+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97297+{
97298+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97299+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97300+
97301+ return 0;
97302+}
97303+#endif
97304+
97305+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97306+{
97307+ if (!vma) {
97308+#ifdef CONFIG_STACK_GROWSUP
97309+ if (addr > sysctl_heap_stack_gap)
97310+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97311+ else
97312+ vma = find_vma(current->mm, 0);
97313+ if (vma && (vma->vm_flags & VM_GROWSUP))
97314+ return false;
97315+#endif
97316+ return true;
97317+ }
97318+
97319+ if (addr + len > vma->vm_start)
97320+ return false;
97321+
97322+ if (vma->vm_flags & VM_GROWSDOWN)
97323+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97324+#ifdef CONFIG_STACK_GROWSUP
97325+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97326+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97327+#endif
97328+ else if (offset)
97329+ return offset <= vma->vm_start - addr - len;
97330+
97331+ return true;
97332+}
97333+
97334+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97335+{
97336+ if (vma->vm_start < len)
97337+ return -ENOMEM;
97338+
97339+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97340+ if (offset <= vma->vm_start - len)
97341+ return vma->vm_start - len - offset;
97342+ else
97343+ return -ENOMEM;
97344+ }
97345+
97346+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97347+ return vma->vm_start - len - sysctl_heap_stack_gap;
97348+ return -ENOMEM;
97349+}
97350+
97351+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97352 {
97353 /*
97354 * We implement the search by looking for an rbtree node that
97355@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97356 }
97357 }
97358
97359- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97360+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97361 check_current:
97362 /* Check if current node has a suitable gap */
97363 if (gap_start > high_limit)
97364 return -ENOMEM;
97365+
97366+ if (gap_end - gap_start > info->threadstack_offset)
97367+ gap_start += info->threadstack_offset;
97368+ else
97369+ gap_start = gap_end;
97370+
97371+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97372+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97373+ gap_start += sysctl_heap_stack_gap;
97374+ else
97375+ gap_start = gap_end;
97376+ }
97377+ if (vma->vm_flags & VM_GROWSDOWN) {
97378+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97379+ gap_end -= sysctl_heap_stack_gap;
97380+ else
97381+ gap_end = gap_start;
97382+ }
97383 if (gap_end >= low_limit && gap_end - gap_start >= length)
97384 goto found;
97385
97386@@ -1803,7 +2051,7 @@ found:
97387 return gap_start;
97388 }
97389
97390-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97391+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97392 {
97393 struct mm_struct *mm = current->mm;
97394 struct vm_area_struct *vma;
97395@@ -1857,6 +2105,24 @@ check_current:
97396 gap_end = vma->vm_start;
97397 if (gap_end < low_limit)
97398 return -ENOMEM;
97399+
97400+ if (gap_end - gap_start > info->threadstack_offset)
97401+ gap_end -= info->threadstack_offset;
97402+ else
97403+ gap_end = gap_start;
97404+
97405+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97406+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97407+ gap_start += sysctl_heap_stack_gap;
97408+ else
97409+ gap_start = gap_end;
97410+ }
97411+ if (vma->vm_flags & VM_GROWSDOWN) {
97412+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97413+ gap_end -= sysctl_heap_stack_gap;
97414+ else
97415+ gap_end = gap_start;
97416+ }
97417 if (gap_start <= high_limit && gap_end - gap_start >= length)
97418 goto found;
97419
97420@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97421 struct mm_struct *mm = current->mm;
97422 struct vm_area_struct *vma;
97423 struct vm_unmapped_area_info info;
97424+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97425
97426 if (len > TASK_SIZE - mmap_min_addr)
97427 return -ENOMEM;
97428@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97429 if (flags & MAP_FIXED)
97430 return addr;
97431
97432+#ifdef CONFIG_PAX_RANDMMAP
97433+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97434+#endif
97435+
97436 if (addr) {
97437 addr = PAGE_ALIGN(addr);
97438 vma = find_vma(mm, addr);
97439 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97440- (!vma || addr + len <= vma->vm_start))
97441+ check_heap_stack_gap(vma, addr, len, offset))
97442 return addr;
97443 }
97444
97445@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97446 info.low_limit = mm->mmap_base;
97447 info.high_limit = TASK_SIZE;
97448 info.align_mask = 0;
97449+ info.threadstack_offset = offset;
97450 return vm_unmapped_area(&info);
97451 }
97452 #endif
97453@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97454 struct mm_struct *mm = current->mm;
97455 unsigned long addr = addr0;
97456 struct vm_unmapped_area_info info;
97457+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97458
97459 /* requested length too big for entire address space */
97460 if (len > TASK_SIZE - mmap_min_addr)
97461@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97462 if (flags & MAP_FIXED)
97463 return addr;
97464
97465+#ifdef CONFIG_PAX_RANDMMAP
97466+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97467+#endif
97468+
97469 /* requesting a specific address */
97470 if (addr) {
97471 addr = PAGE_ALIGN(addr);
97472 vma = find_vma(mm, addr);
97473 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97474- (!vma || addr + len <= vma->vm_start))
97475+ check_heap_stack_gap(vma, addr, len, offset))
97476 return addr;
97477 }
97478
97479@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97480 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97481 info.high_limit = mm->mmap_base;
97482 info.align_mask = 0;
97483+ info.threadstack_offset = offset;
97484 addr = vm_unmapped_area(&info);
97485
97486 /*
97487@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97488 VM_BUG_ON(addr != -ENOMEM);
97489 info.flags = 0;
97490 info.low_limit = TASK_UNMAPPED_BASE;
97491+
97492+#ifdef CONFIG_PAX_RANDMMAP
97493+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97494+ info.low_limit += mm->delta_mmap;
97495+#endif
97496+
97497 info.high_limit = TASK_SIZE;
97498 addr = vm_unmapped_area(&info);
97499 }
97500@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97501 return vma;
97502 }
97503
97504+#ifdef CONFIG_PAX_SEGMEXEC
97505+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97506+{
97507+ struct vm_area_struct *vma_m;
97508+
97509+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97510+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97511+ BUG_ON(vma->vm_mirror);
97512+ return NULL;
97513+ }
97514+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97515+ vma_m = vma->vm_mirror;
97516+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97517+ BUG_ON(vma->vm_file != vma_m->vm_file);
97518+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97519+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97520+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97521+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97522+ return vma_m;
97523+}
97524+#endif
97525+
97526 /*
97527 * Verify that the stack growth is acceptable and
97528 * update accounting. This is shared with both the
97529@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97530
97531 /* Stack limit test */
97532 actual_size = size;
97533- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97534- actual_size -= PAGE_SIZE;
97535+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97536 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97537 return -ENOMEM;
97538
97539@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97540 locked = mm->locked_vm + grow;
97541 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97542 limit >>= PAGE_SHIFT;
97543+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97544 if (locked > limit && !capable(CAP_IPC_LOCK))
97545 return -ENOMEM;
97546 }
97547@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97548 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97549 * vma is the last one with address > vma->vm_end. Have to extend vma.
97550 */
97551+#ifndef CONFIG_IA64
97552+static
97553+#endif
97554 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97555 {
97556 int error;
97557+ bool locknext;
97558
97559 if (!(vma->vm_flags & VM_GROWSUP))
97560 return -EFAULT;
97561
97562+ /* Also guard against wrapping around to address 0. */
97563+ if (address < PAGE_ALIGN(address+1))
97564+ address = PAGE_ALIGN(address+1);
97565+ else
97566+ return -ENOMEM;
97567+
97568 /*
97569 * We must make sure the anon_vma is allocated
97570 * so that the anon_vma locking is not a noop.
97571 */
97572 if (unlikely(anon_vma_prepare(vma)))
97573 return -ENOMEM;
97574+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97575+ if (locknext && anon_vma_prepare(vma->vm_next))
97576+ return -ENOMEM;
97577 vma_lock_anon_vma(vma);
97578+ if (locknext)
97579+ vma_lock_anon_vma(vma->vm_next);
97580
97581 /*
97582 * vma->vm_start/vm_end cannot change under us because the caller
97583 * is required to hold the mmap_sem in read mode. We need the
97584- * anon_vma lock to serialize against concurrent expand_stacks.
97585- * Also guard against wrapping around to address 0.
97586+ * anon_vma locks to serialize against concurrent expand_stacks
97587+ * and expand_upwards.
97588 */
97589- if (address < PAGE_ALIGN(address+4))
97590- address = PAGE_ALIGN(address+4);
97591- else {
97592- vma_unlock_anon_vma(vma);
97593- return -ENOMEM;
97594- }
97595 error = 0;
97596
97597 /* Somebody else might have raced and expanded it already */
97598- if (address > vma->vm_end) {
97599+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97600+ error = -ENOMEM;
97601+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97602 unsigned long size, grow;
97603
97604 size = address - vma->vm_start;
97605@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97606 }
97607 }
97608 }
97609+ if (locknext)
97610+ vma_unlock_anon_vma(vma->vm_next);
97611 vma_unlock_anon_vma(vma);
97612 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97613 validate_mm(vma->vm_mm);
97614@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
97615 unsigned long address)
97616 {
97617 int error;
97618+ bool lockprev = false;
97619+ struct vm_area_struct *prev;
97620
97621 /*
97622 * We must make sure the anon_vma is allocated
97623@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
97624 if (error)
97625 return error;
97626
97627+ prev = vma->vm_prev;
97628+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97629+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97630+#endif
97631+ if (lockprev && anon_vma_prepare(prev))
97632+ return -ENOMEM;
97633+ if (lockprev)
97634+ vma_lock_anon_vma(prev);
97635+
97636 vma_lock_anon_vma(vma);
97637
97638 /*
97639@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
97640 */
97641
97642 /* Somebody else might have raced and expanded it already */
97643- if (address < vma->vm_start) {
97644+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97645+ error = -ENOMEM;
97646+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97647 unsigned long size, grow;
97648
97649+#ifdef CONFIG_PAX_SEGMEXEC
97650+ struct vm_area_struct *vma_m;
97651+
97652+ vma_m = pax_find_mirror_vma(vma);
97653+#endif
97654+
97655 size = vma->vm_end - address;
97656 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97657
97658@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
97659 vma->vm_pgoff -= grow;
97660 anon_vma_interval_tree_post_update_vma(vma);
97661 vma_gap_update(vma);
97662+
97663+#ifdef CONFIG_PAX_SEGMEXEC
97664+ if (vma_m) {
97665+ anon_vma_interval_tree_pre_update_vma(vma_m);
97666+ vma_m->vm_start -= grow << PAGE_SHIFT;
97667+ vma_m->vm_pgoff -= grow;
97668+ anon_vma_interval_tree_post_update_vma(vma_m);
97669+ vma_gap_update(vma_m);
97670+ }
97671+#endif
97672+
97673 spin_unlock(&vma->vm_mm->page_table_lock);
97674
97675+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97676 perf_event_mmap(vma);
97677 }
97678 }
97679 }
97680 vma_unlock_anon_vma(vma);
97681+ if (lockprev)
97682+ vma_unlock_anon_vma(prev);
97683 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97684 validate_mm(vma->vm_mm);
97685 return error;
97686@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97687 do {
97688 long nrpages = vma_pages(vma);
97689
97690+#ifdef CONFIG_PAX_SEGMEXEC
97691+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97692+ vma = remove_vma(vma);
97693+ continue;
97694+ }
97695+#endif
97696+
97697 if (vma->vm_flags & VM_ACCOUNT)
97698 nr_accounted += nrpages;
97699 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97700@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97701 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97702 vma->vm_prev = NULL;
97703 do {
97704+
97705+#ifdef CONFIG_PAX_SEGMEXEC
97706+ if (vma->vm_mirror) {
97707+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97708+ vma->vm_mirror->vm_mirror = NULL;
97709+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97710+ vma->vm_mirror = NULL;
97711+ }
97712+#endif
97713+
97714 vma_rb_erase(vma, &mm->mm_rb);
97715 mm->map_count--;
97716 tail_vma = vma;
97717@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97718 struct vm_area_struct *new;
97719 int err = -ENOMEM;
97720
97721+#ifdef CONFIG_PAX_SEGMEXEC
97722+ struct vm_area_struct *vma_m, *new_m = NULL;
97723+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97724+#endif
97725+
97726 if (is_vm_hugetlb_page(vma) && (addr &
97727 ~(huge_page_mask(hstate_vma(vma)))))
97728 return -EINVAL;
97729
97730+#ifdef CONFIG_PAX_SEGMEXEC
97731+ vma_m = pax_find_mirror_vma(vma);
97732+#endif
97733+
97734 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97735 if (!new)
97736 goto out_err;
97737
97738+#ifdef CONFIG_PAX_SEGMEXEC
97739+ if (vma_m) {
97740+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97741+ if (!new_m) {
97742+ kmem_cache_free(vm_area_cachep, new);
97743+ goto out_err;
97744+ }
97745+ }
97746+#endif
97747+
97748 /* most fields are the same, copy all, and then fixup */
97749 *new = *vma;
97750
97751@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97752 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97753 }
97754
97755+#ifdef CONFIG_PAX_SEGMEXEC
97756+ if (vma_m) {
97757+ *new_m = *vma_m;
97758+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97759+ new_m->vm_mirror = new;
97760+ new->vm_mirror = new_m;
97761+
97762+ if (new_below)
97763+ new_m->vm_end = addr_m;
97764+ else {
97765+ new_m->vm_start = addr_m;
97766+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97767+ }
97768+ }
97769+#endif
97770+
97771 err = vma_dup_policy(vma, new);
97772 if (err)
97773 goto out_free_vma;
97774@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97775 else
97776 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97777
97778+#ifdef CONFIG_PAX_SEGMEXEC
97779+ if (!err && vma_m) {
97780+ struct mempolicy *pol = vma_policy(new);
97781+
97782+ if (anon_vma_clone(new_m, vma_m))
97783+ goto out_free_mpol;
97784+
97785+ mpol_get(pol);
97786+ set_vma_policy(new_m, pol);
97787+
97788+ if (new_m->vm_file)
97789+ get_file(new_m->vm_file);
97790+
97791+ if (new_m->vm_ops && new_m->vm_ops->open)
97792+ new_m->vm_ops->open(new_m);
97793+
97794+ if (new_below)
97795+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97796+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97797+ else
97798+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97799+
97800+ if (err) {
97801+ if (new_m->vm_ops && new_m->vm_ops->close)
97802+ new_m->vm_ops->close(new_m);
97803+ if (new_m->vm_file)
97804+ fput(new_m->vm_file);
97805+ mpol_put(pol);
97806+ }
97807+ }
97808+#endif
97809+
97810 /* Success. */
97811 if (!err)
97812 return 0;
97813@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97814 new->vm_ops->close(new);
97815 if (new->vm_file)
97816 fput(new->vm_file);
97817- unlink_anon_vmas(new);
97818 out_free_mpol:
97819 mpol_put(vma_policy(new));
97820 out_free_vma:
97821+
97822+#ifdef CONFIG_PAX_SEGMEXEC
97823+ if (new_m) {
97824+ unlink_anon_vmas(new_m);
97825+ kmem_cache_free(vm_area_cachep, new_m);
97826+ }
97827+#endif
97828+
97829+ unlink_anon_vmas(new);
97830 kmem_cache_free(vm_area_cachep, new);
97831 out_err:
97832 return err;
97833@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97834 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97835 unsigned long addr, int new_below)
97836 {
97837+
97838+#ifdef CONFIG_PAX_SEGMEXEC
97839+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97840+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97841+ if (mm->map_count >= sysctl_max_map_count-1)
97842+ return -ENOMEM;
97843+ } else
97844+#endif
97845+
97846 if (mm->map_count >= sysctl_max_map_count)
97847 return -ENOMEM;
97848
97849@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97850 * work. This now handles partial unmappings.
97851 * Jeremy Fitzhardinge <jeremy@goop.org>
97852 */
97853+#ifdef CONFIG_PAX_SEGMEXEC
97854 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97855 {
97856+ int ret = __do_munmap(mm, start, len);
97857+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97858+ return ret;
97859+
97860+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97861+}
97862+
97863+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97864+#else
97865+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97866+#endif
97867+{
97868 unsigned long end;
97869 struct vm_area_struct *vma, *prev, *last;
97870
97871+ /*
97872+ * mm->mmap_sem is required to protect against another thread
97873+ * changing the mappings in case we sleep.
97874+ */
97875+ verify_mm_writelocked(mm);
97876+
97877 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97878 return -EINVAL;
97879
97880@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97881 /* Fix up all other VM information */
97882 remove_vma_list(mm, vma);
97883
97884+ track_exec_limit(mm, start, end, 0UL);
97885+
97886 return 0;
97887 }
97888
97889@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
97890 int ret;
97891 struct mm_struct *mm = current->mm;
97892
97893+
97894+#ifdef CONFIG_PAX_SEGMEXEC
97895+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97896+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97897+ return -EINVAL;
97898+#endif
97899+
97900 down_write(&mm->mmap_sem);
97901 ret = do_munmap(mm, start, len);
97902 up_write(&mm->mmap_sem);
97903@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97904 return vm_munmap(addr, len);
97905 }
97906
97907-static inline void verify_mm_writelocked(struct mm_struct *mm)
97908-{
97909-#ifdef CONFIG_DEBUG_VM
97910- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97911- WARN_ON(1);
97912- up_read(&mm->mmap_sem);
97913- }
97914-#endif
97915-}
97916-
97917 /*
97918 * this is really a simplified "do_mmap". it only handles
97919 * anonymous maps. eventually we may be able to do some
97920@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97921 struct rb_node **rb_link, *rb_parent;
97922 pgoff_t pgoff = addr >> PAGE_SHIFT;
97923 int error;
97924+ unsigned long charged;
97925
97926 len = PAGE_ALIGN(len);
97927 if (!len)
97928@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97929
97930 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97931
97932+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97933+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97934+ flags &= ~VM_EXEC;
97935+
97936+#ifdef CONFIG_PAX_MPROTECT
97937+ if (mm->pax_flags & MF_PAX_MPROTECT)
97938+ flags &= ~VM_MAYEXEC;
97939+#endif
97940+
97941+ }
97942+#endif
97943+
97944 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97945 if (error & ~PAGE_MASK)
97946 return error;
97947
97948+ charged = len >> PAGE_SHIFT;
97949+
97950 error = mlock_future_check(mm, mm->def_flags, len);
97951 if (error)
97952 return error;
97953@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97954 /*
97955 * Clear old maps. this also does some error checking for us
97956 */
97957- munmap_back:
97958 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97959 if (do_munmap(mm, addr, len))
97960 return -ENOMEM;
97961- goto munmap_back;
97962+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97963 }
97964
97965 /* Check against address space limits *after* clearing old maps... */
97966- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97967+ if (!may_expand_vm(mm, charged))
97968 return -ENOMEM;
97969
97970 if (mm->map_count > sysctl_max_map_count)
97971 return -ENOMEM;
97972
97973- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97974+ if (security_vm_enough_memory_mm(mm, charged))
97975 return -ENOMEM;
97976
97977 /* Can we just expand an old private anonymous mapping? */
97978@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97979 */
97980 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97981 if (!vma) {
97982- vm_unacct_memory(len >> PAGE_SHIFT);
97983+ vm_unacct_memory(charged);
97984 return -ENOMEM;
97985 }
97986
97987@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97988 vma_link(mm, vma, prev, rb_link, rb_parent);
97989 out:
97990 perf_event_mmap(vma);
97991- mm->total_vm += len >> PAGE_SHIFT;
97992+ mm->total_vm += charged;
97993 if (flags & VM_LOCKED)
97994- mm->locked_vm += (len >> PAGE_SHIFT);
97995+ mm->locked_vm += charged;
97996 vma->vm_flags |= VM_SOFTDIRTY;
97997+ track_exec_limit(mm, addr, addr + len, flags);
97998 return addr;
97999 }
98000
98001@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
98002 while (vma) {
98003 if (vma->vm_flags & VM_ACCOUNT)
98004 nr_accounted += vma_pages(vma);
98005+ vma->vm_mirror = NULL;
98006 vma = remove_vma(vma);
98007 }
98008 vm_unacct_memory(nr_accounted);
98009@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98010 struct vm_area_struct *prev;
98011 struct rb_node **rb_link, *rb_parent;
98012
98013+#ifdef CONFIG_PAX_SEGMEXEC
98014+ struct vm_area_struct *vma_m = NULL;
98015+#endif
98016+
98017+ if (security_mmap_addr(vma->vm_start))
98018+ return -EPERM;
98019+
98020 /*
98021 * The vm_pgoff of a purely anonymous vma should be irrelevant
98022 * until its first write fault, when page's anon_vma and index
98023@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98024 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98025 return -ENOMEM;
98026
98027+#ifdef CONFIG_PAX_SEGMEXEC
98028+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98029+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98030+ if (!vma_m)
98031+ return -ENOMEM;
98032+ }
98033+#endif
98034+
98035 vma_link(mm, vma, prev, rb_link, rb_parent);
98036+
98037+#ifdef CONFIG_PAX_SEGMEXEC
98038+ if (vma_m)
98039+ BUG_ON(pax_mirror_vma(vma_m, vma));
98040+#endif
98041+
98042 return 0;
98043 }
98044
98045@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98046 struct rb_node **rb_link, *rb_parent;
98047 bool faulted_in_anon_vma = true;
98048
98049+ BUG_ON(vma->vm_mirror);
98050+
98051 /*
98052 * If anonymous vma has not yet been faulted, update new pgoff
98053 * to match new location, to increase its chance of merging.
98054@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98055 return NULL;
98056 }
98057
98058+#ifdef CONFIG_PAX_SEGMEXEC
98059+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98060+{
98061+ struct vm_area_struct *prev_m;
98062+ struct rb_node **rb_link_m, *rb_parent_m;
98063+ struct mempolicy *pol_m;
98064+
98065+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98066+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98067+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98068+ *vma_m = *vma;
98069+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98070+ if (anon_vma_clone(vma_m, vma))
98071+ return -ENOMEM;
98072+ pol_m = vma_policy(vma_m);
98073+ mpol_get(pol_m);
98074+ set_vma_policy(vma_m, pol_m);
98075+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98076+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98077+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98078+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98079+ if (vma_m->vm_file)
98080+ get_file(vma_m->vm_file);
98081+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98082+ vma_m->vm_ops->open(vma_m);
98083+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98084+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98085+ vma_m->vm_mirror = vma;
98086+ vma->vm_mirror = vma_m;
98087+ return 0;
98088+}
98089+#endif
98090+
98091 /*
98092 * Return true if the calling process may expand its vm space by the passed
98093 * number of pages
98094@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98095
98096 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98097
98098+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98099 if (cur + npages > lim)
98100 return 0;
98101 return 1;
98102@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
98103 vma->vm_start = addr;
98104 vma->vm_end = addr + len;
98105
98106+#ifdef CONFIG_PAX_MPROTECT
98107+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98108+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98109+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98110+ return ERR_PTR(-EPERM);
98111+ if (!(vm_flags & VM_EXEC))
98112+ vm_flags &= ~VM_MAYEXEC;
98113+#else
98114+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98115+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98116+#endif
98117+ else
98118+ vm_flags &= ~VM_MAYWRITE;
98119+ }
98120+#endif
98121+
98122 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98124
98125diff --git a/mm/mprotect.c b/mm/mprotect.c
98126index ace9345..63320dc 100644
98127--- a/mm/mprotect.c
98128+++ b/mm/mprotect.c
98129@@ -24,10 +24,18 @@
98130 #include <linux/migrate.h>
98131 #include <linux/perf_event.h>
98132 #include <linux/ksm.h>
98133+#include <linux/sched/sysctl.h>
98134+
98135+#ifdef CONFIG_PAX_MPROTECT
98136+#include <linux/elf.h>
98137+#include <linux/binfmts.h>
98138+#endif
98139+
98140 #include <asm/uaccess.h>
98141 #include <asm/pgtable.h>
98142 #include <asm/cacheflush.h>
98143 #include <asm/tlbflush.h>
98144+#include <asm/mmu_context.h>
98145
98146 /*
98147 * For a prot_numa update we only hold mmap_sem for read so there is a
98148@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98149 return pages;
98150 }
98151
98152+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98153+/* called while holding the mmap semaphor for writing except stack expansion */
98154+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98155+{
98156+ unsigned long oldlimit, newlimit = 0UL;
98157+
98158+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98159+ return;
98160+
98161+ spin_lock(&mm->page_table_lock);
98162+ oldlimit = mm->context.user_cs_limit;
98163+ if ((prot & VM_EXEC) && oldlimit < end)
98164+ /* USER_CS limit moved up */
98165+ newlimit = end;
98166+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98167+ /* USER_CS limit moved down */
98168+ newlimit = start;
98169+
98170+ if (newlimit) {
98171+ mm->context.user_cs_limit = newlimit;
98172+
98173+#ifdef CONFIG_SMP
98174+ wmb();
98175+ cpus_clear(mm->context.cpu_user_cs_mask);
98176+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98177+#endif
98178+
98179+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98180+ }
98181+ spin_unlock(&mm->page_table_lock);
98182+ if (newlimit == end) {
98183+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98184+
98185+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98186+ if (is_vm_hugetlb_page(vma))
98187+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98188+ else
98189+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98190+ }
98191+}
98192+#endif
98193+
98194 int
98195 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98196 unsigned long start, unsigned long end, unsigned long newflags)
98197@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98198 int error;
98199 int dirty_accountable = 0;
98200
98201+#ifdef CONFIG_PAX_SEGMEXEC
98202+ struct vm_area_struct *vma_m = NULL;
98203+ unsigned long start_m, end_m;
98204+
98205+ start_m = start + SEGMEXEC_TASK_SIZE;
98206+ end_m = end + SEGMEXEC_TASK_SIZE;
98207+#endif
98208+
98209 if (newflags == oldflags) {
98210 *pprev = vma;
98211 return 0;
98212 }
98213
98214+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98215+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98216+
98217+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98218+ return -ENOMEM;
98219+
98220+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98221+ return -ENOMEM;
98222+ }
98223+
98224 /*
98225 * If we make a private mapping writable we increase our commit;
98226 * but (without finer accounting) cannot reduce our commit if we
98227@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98228 }
98229 }
98230
98231+#ifdef CONFIG_PAX_SEGMEXEC
98232+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98233+ if (start != vma->vm_start) {
98234+ error = split_vma(mm, vma, start, 1);
98235+ if (error)
98236+ goto fail;
98237+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98238+ *pprev = (*pprev)->vm_next;
98239+ }
98240+
98241+ if (end != vma->vm_end) {
98242+ error = split_vma(mm, vma, end, 0);
98243+ if (error)
98244+ goto fail;
98245+ }
98246+
98247+ if (pax_find_mirror_vma(vma)) {
98248+ error = __do_munmap(mm, start_m, end_m - start_m);
98249+ if (error)
98250+ goto fail;
98251+ } else {
98252+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98253+ if (!vma_m) {
98254+ error = -ENOMEM;
98255+ goto fail;
98256+ }
98257+ vma->vm_flags = newflags;
98258+ error = pax_mirror_vma(vma_m, vma);
98259+ if (error) {
98260+ vma->vm_flags = oldflags;
98261+ goto fail;
98262+ }
98263+ }
98264+ }
98265+#endif
98266+
98267 /*
98268 * First try to merge with previous and/or next vma.
98269 */
98270@@ -314,7 +418,19 @@ success:
98271 * vm_flags and vm_page_prot are protected by the mmap_sem
98272 * held in write mode.
98273 */
98274+
98275+#ifdef CONFIG_PAX_SEGMEXEC
98276+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98277+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98278+#endif
98279+
98280 vma->vm_flags = newflags;
98281+
98282+#ifdef CONFIG_PAX_MPROTECT
98283+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98284+ mm->binfmt->handle_mprotect(vma, newflags);
98285+#endif
98286+
98287 dirty_accountable = vma_wants_writenotify(vma);
98288 vma_set_page_prot(vma);
98289
98290@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98291 end = start + len;
98292 if (end <= start)
98293 return -ENOMEM;
98294+
98295+#ifdef CONFIG_PAX_SEGMEXEC
98296+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98297+ if (end > SEGMEXEC_TASK_SIZE)
98298+ return -EINVAL;
98299+ } else
98300+#endif
98301+
98302+ if (end > TASK_SIZE)
98303+ return -EINVAL;
98304+
98305 if (!arch_validate_prot(prot))
98306 return -EINVAL;
98307
98308@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98309 /*
98310 * Does the application expect PROT_READ to imply PROT_EXEC:
98311 */
98312- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98313+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98314 prot |= PROT_EXEC;
98315
98316 vm_flags = calc_vm_prot_bits(prot);
98317@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98318 if (start > vma->vm_start)
98319 prev = vma;
98320
98321+#ifdef CONFIG_PAX_MPROTECT
98322+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98323+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98324+#endif
98325+
98326 for (nstart = start ; ; ) {
98327 unsigned long newflags;
98328
98329@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98330
98331 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98332 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98333+ if (prot & (PROT_WRITE | PROT_EXEC))
98334+ gr_log_rwxmprotect(vma);
98335+
98336+ error = -EACCES;
98337+ goto out;
98338+ }
98339+
98340+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98341 error = -EACCES;
98342 goto out;
98343 }
98344@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98345 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98346 if (error)
98347 goto out;
98348+
98349+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98350+
98351 nstart = tmp;
98352
98353 if (nstart < prev->vm_end)
98354diff --git a/mm/mremap.c b/mm/mremap.c
98355index 17fa018..6f7892b 100644
98356--- a/mm/mremap.c
98357+++ b/mm/mremap.c
98358@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98359 continue;
98360 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98361 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98362+
98363+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98364+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98365+ pte = pte_exprotect(pte);
98366+#endif
98367+
98368 pte = move_soft_dirty_pte(pte);
98369 set_pte_at(mm, new_addr, new_pte, pte);
98370 }
98371@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98372 if (is_vm_hugetlb_page(vma))
98373 goto Einval;
98374
98375+#ifdef CONFIG_PAX_SEGMEXEC
98376+ if (pax_find_mirror_vma(vma))
98377+ goto Einval;
98378+#endif
98379+
98380 /* We can't remap across vm area boundaries */
98381 if (old_len > vma->vm_end - addr)
98382 goto Efault;
98383@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98384 unsigned long ret = -EINVAL;
98385 unsigned long charged = 0;
98386 unsigned long map_flags;
98387+ unsigned long pax_task_size = TASK_SIZE;
98388
98389 if (new_addr & ~PAGE_MASK)
98390 goto out;
98391
98392- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98393+#ifdef CONFIG_PAX_SEGMEXEC
98394+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98395+ pax_task_size = SEGMEXEC_TASK_SIZE;
98396+#endif
98397+
98398+ pax_task_size -= PAGE_SIZE;
98399+
98400+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98401 goto out;
98402
98403 /* Check if the location we're moving into overlaps the
98404 * old location at all, and fail if it does.
98405 */
98406- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98407- goto out;
98408-
98409- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98410+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98411 goto out;
98412
98413 ret = do_munmap(mm, new_addr, new_len);
98414@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98415 unsigned long ret = -EINVAL;
98416 unsigned long charged = 0;
98417 bool locked = false;
98418+ unsigned long pax_task_size = TASK_SIZE;
98419
98420 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98421 return ret;
98422@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98423 if (!new_len)
98424 return ret;
98425
98426+#ifdef CONFIG_PAX_SEGMEXEC
98427+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98428+ pax_task_size = SEGMEXEC_TASK_SIZE;
98429+#endif
98430+
98431+ pax_task_size -= PAGE_SIZE;
98432+
98433+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98434+ old_len > pax_task_size || addr > pax_task_size-old_len)
98435+ return ret;
98436+
98437 down_write(&current->mm->mmap_sem);
98438
98439 if (flags & MREMAP_FIXED) {
98440@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98441 new_addr = addr;
98442 }
98443 ret = addr;
98444+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98445 goto out;
98446 }
98447 }
98448@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98449 goto out;
98450 }
98451
98452+ map_flags = vma->vm_flags;
98453 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98454+ if (!(ret & ~PAGE_MASK)) {
98455+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98456+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98457+ }
98458 }
98459 out:
98460 if (ret & ~PAGE_MASK)
98461diff --git a/mm/nommu.c b/mm/nommu.c
98462index ae5baae..cbb2ed5 100644
98463--- a/mm/nommu.c
98464+++ b/mm/nommu.c
98465@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98466 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98467 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98468 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98469-int heap_stack_gap = 0;
98470
98471 atomic_long_t mmap_pages_allocated;
98472
98473@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98474 EXPORT_SYMBOL(find_vma);
98475
98476 /*
98477- * find a VMA
98478- * - we don't extend stack VMAs under NOMMU conditions
98479- */
98480-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98481-{
98482- return find_vma(mm, addr);
98483-}
98484-
98485-/*
98486 * expand a stack to a given address
98487 * - not supported under NOMMU conditions
98488 */
98489@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98490
98491 /* most fields are the same, copy all, and then fixup */
98492 *new = *vma;
98493+ INIT_LIST_HEAD(&new->anon_vma_chain);
98494 *region = *vma->vm_region;
98495 new->vm_region = region;
98496
98497@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98498 }
98499 EXPORT_SYMBOL(generic_file_remap_pages);
98500
98501-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98502- unsigned long addr, void *buf, int len, int write)
98503+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98504+ unsigned long addr, void *buf, size_t len, int write)
98505 {
98506 struct vm_area_struct *vma;
98507
98508@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98509 *
98510 * The caller must hold a reference on @mm.
98511 */
98512-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98513- void *buf, int len, int write)
98514+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98515+ void *buf, size_t len, int write)
98516 {
98517 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98518 }
98519@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98520 * Access another process' address space.
98521 * - source/target buffer must be kernel space
98522 */
98523-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98524+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98525 {
98526 struct mm_struct *mm;
98527
98528diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98529index 6f43352..e44bf41 100644
98530--- a/mm/page-writeback.c
98531+++ b/mm/page-writeback.c
98532@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98533 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98534 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98535 */
98536-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98537+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98538 unsigned long thresh,
98539 unsigned long bg_thresh,
98540 unsigned long dirty,
98541diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98542index 8bbef06..a8d1989 100644
98543--- a/mm/page_alloc.c
98544+++ b/mm/page_alloc.c
98545@@ -60,6 +60,7 @@
98546 #include <linux/hugetlb.h>
98547 #include <linux/sched/rt.h>
98548 #include <linux/page_owner.h>
98549+#include <linux/random.h>
98550
98551 #include <asm/sections.h>
98552 #include <asm/tlbflush.h>
98553@@ -358,7 +359,7 @@ out:
98554 * This usage means that zero-order pages may not be compound.
98555 */
98556
98557-static void free_compound_page(struct page *page)
98558+void free_compound_page(struct page *page)
98559 {
98560 __free_pages_ok(page, compound_order(page));
98561 }
98562@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98563 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98564 }
98565 #else
98566-struct page_ext_operations debug_guardpage_ops = { NULL, };
98567+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98568 static inline void set_page_guard(struct zone *zone, struct page *page,
98569 unsigned int order, int migratetype) {}
98570 static inline void clear_page_guard(struct zone *zone, struct page *page,
98571@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98572 int i;
98573 int bad = 0;
98574
98575+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98576+ unsigned long index = 1UL << order;
98577+#endif
98578+
98579 VM_BUG_ON_PAGE(PageTail(page), page);
98580 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98581
98582@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98583 debug_check_no_obj_freed(page_address(page),
98584 PAGE_SIZE << order);
98585 }
98586+
98587+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98588+ for (; index; --index)
98589+ sanitize_highpage(page + index - 1);
98590+#endif
98591+
98592 arch_free_page(page, order);
98593 kernel_map_pages(page, 1 << order, 0);
98594
98595@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98596 local_irq_restore(flags);
98597 }
98598
98599+#ifdef CONFIG_PAX_LATENT_ENTROPY
98600+bool __meminitdata extra_latent_entropy;
98601+
98602+static int __init setup_pax_extra_latent_entropy(char *str)
98603+{
98604+ extra_latent_entropy = true;
98605+ return 0;
98606+}
98607+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98608+
98609+volatile u64 latent_entropy __latent_entropy;
98610+EXPORT_SYMBOL(latent_entropy);
98611+#endif
98612+
98613 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98614 {
98615 unsigned int nr_pages = 1 << order;
98616@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98617 __ClearPageReserved(p);
98618 set_page_count(p, 0);
98619
98620+#ifdef CONFIG_PAX_LATENT_ENTROPY
98621+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98622+ u64 hash = 0;
98623+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98624+ const u64 *data = lowmem_page_address(page);
98625+
98626+ for (index = 0; index < end; index++)
98627+ hash ^= hash + data[index];
98628+ latent_entropy ^= hash;
98629+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98630+ }
98631+#endif
98632+
98633 page_zone(page)->managed_pages += nr_pages;
98634 set_page_refcounted(page);
98635 __free_pages(page, order);
98636@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98637 arch_alloc_page(page, order);
98638 kernel_map_pages(page, 1 << order, 1);
98639
98640+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98641 if (gfp_flags & __GFP_ZERO)
98642 prep_zero_page(page, order, gfp_flags);
98643+#endif
98644
98645 if (order && (gfp_flags & __GFP_COMP))
98646 prep_compound_page(page, order);
98647@@ -1700,7 +1740,7 @@ again:
98648 }
98649
98650 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98651- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98652+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98653 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98654 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98655
98656@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98657 do {
98658 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98659 high_wmark_pages(zone) - low_wmark_pages(zone) -
98660- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98661+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98662 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98663 } while (zone++ != preferred_zone);
98664 }
98665@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
98666
98667 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98668 high_wmark_pages(zone) - low_wmark_pages(zone) -
98669- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98670+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98671
98672 setup_zone_migrate_reserve(zone);
98673 spin_unlock_irqrestore(&zone->lock, flags);
98674diff --git a/mm/percpu.c b/mm/percpu.c
98675index d39e2f4..de5f4b4 100644
98676--- a/mm/percpu.c
98677+++ b/mm/percpu.c
98678@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98679 static unsigned int pcpu_high_unit_cpu __read_mostly;
98680
98681 /* the address of the first chunk which starts with the kernel static area */
98682-void *pcpu_base_addr __read_mostly;
98683+void *pcpu_base_addr __read_only;
98684 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98685
98686 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98687diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98688index 5077afc..846c9ef 100644
98689--- a/mm/process_vm_access.c
98690+++ b/mm/process_vm_access.c
98691@@ -13,6 +13,7 @@
98692 #include <linux/uio.h>
98693 #include <linux/sched.h>
98694 #include <linux/highmem.h>
98695+#include <linux/security.h>
98696 #include <linux/ptrace.h>
98697 #include <linux/slab.h>
98698 #include <linux/syscalls.h>
98699@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98700 ssize_t iov_len;
98701 size_t total_len = iov_iter_count(iter);
98702
98703+ return -ENOSYS; // PaX: until properly audited
98704+
98705 /*
98706 * Work out how many pages of struct pages we're going to need
98707 * when eventually calling get_user_pages
98708 */
98709 for (i = 0; i < riovcnt; i++) {
98710 iov_len = rvec[i].iov_len;
98711- if (iov_len > 0) {
98712- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98713- + iov_len)
98714- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98715- / PAGE_SIZE + 1;
98716- nr_pages = max(nr_pages, nr_pages_iov);
98717- }
98718+ if (iov_len <= 0)
98719+ continue;
98720+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98721+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98722+ nr_pages = max(nr_pages, nr_pages_iov);
98723 }
98724
98725 if (nr_pages == 0)
98726@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98727 goto free_proc_pages;
98728 }
98729
98730+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98731+ rc = -EPERM;
98732+ goto put_task_struct;
98733+ }
98734+
98735 mm = mm_access(task, PTRACE_MODE_ATTACH);
98736 if (!mm || IS_ERR(mm)) {
98737 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98738diff --git a/mm/rmap.c b/mm/rmap.c
98739index 71cd5bd..e259089 100644
98740--- a/mm/rmap.c
98741+++ b/mm/rmap.c
98742@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98743 struct anon_vma *anon_vma = vma->anon_vma;
98744 struct anon_vma_chain *avc;
98745
98746+#ifdef CONFIG_PAX_SEGMEXEC
98747+ struct anon_vma_chain *avc_m = NULL;
98748+#endif
98749+
98750 might_sleep();
98751 if (unlikely(!anon_vma)) {
98752 struct mm_struct *mm = vma->vm_mm;
98753@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98754 if (!avc)
98755 goto out_enomem;
98756
98757+#ifdef CONFIG_PAX_SEGMEXEC
98758+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98759+ if (!avc_m)
98760+ goto out_enomem_free_avc;
98761+#endif
98762+
98763 anon_vma = find_mergeable_anon_vma(vma);
98764 allocated = NULL;
98765 if (!anon_vma) {
98766@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98767 /* page_table_lock to protect against threads */
98768 spin_lock(&mm->page_table_lock);
98769 if (likely(!vma->anon_vma)) {
98770+
98771+#ifdef CONFIG_PAX_SEGMEXEC
98772+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98773+
98774+ if (vma_m) {
98775+ BUG_ON(vma_m->anon_vma);
98776+ vma_m->anon_vma = anon_vma;
98777+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98778+ anon_vma->degree++;
98779+ avc_m = NULL;
98780+ }
98781+#endif
98782+
98783 vma->anon_vma = anon_vma;
98784 anon_vma_chain_link(vma, avc, anon_vma);
98785 /* vma reference or self-parent link for new root */
98786@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98787
98788 if (unlikely(allocated))
98789 put_anon_vma(allocated);
98790+
98791+#ifdef CONFIG_PAX_SEGMEXEC
98792+ if (unlikely(avc_m))
98793+ anon_vma_chain_free(avc_m);
98794+#endif
98795+
98796 if (unlikely(avc))
98797 anon_vma_chain_free(avc);
98798 }
98799 return 0;
98800
98801 out_enomem_free_avc:
98802+
98803+#ifdef CONFIG_PAX_SEGMEXEC
98804+ if (avc_m)
98805+ anon_vma_chain_free(avc_m);
98806+#endif
98807+
98808 anon_vma_chain_free(avc);
98809 out_enomem:
98810 return -ENOMEM;
98811@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98812 * good chance of avoiding scanning the whole hierarchy when it searches where
98813 * page is mapped.
98814 */
98815-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98816+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98817 {
98818 struct anon_vma_chain *avc, *pavc;
98819 struct anon_vma *root = NULL;
98820@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98821 * the corresponding VMA in the parent process is attached to.
98822 * Returns 0 on success, non-zero on failure.
98823 */
98824-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98825+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98826 {
98827 struct anon_vma_chain *avc;
98828 struct anon_vma *anon_vma;
98829@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
98830 void __init anon_vma_init(void)
98831 {
98832 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98833- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98834- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98835+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98836+ anon_vma_ctor);
98837+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98838+ SLAB_PANIC|SLAB_NO_SANITIZE);
98839 }
98840
98841 /*
98842diff --git a/mm/shmem.c b/mm/shmem.c
98843index 993e6ba..a962ba3 100644
98844--- a/mm/shmem.c
98845+++ b/mm/shmem.c
98846@@ -33,7 +33,7 @@
98847 #include <linux/swap.h>
98848 #include <linux/aio.h>
98849
98850-static struct vfsmount *shm_mnt;
98851+struct vfsmount *shm_mnt;
98852
98853 #ifdef CONFIG_SHMEM
98854 /*
98855@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98856 #define BOGO_DIRENT_SIZE 20
98857
98858 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98859-#define SHORT_SYMLINK_LEN 128
98860+#define SHORT_SYMLINK_LEN 64
98861
98862 /*
98863 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98864@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98865 static int shmem_xattr_validate(const char *name)
98866 {
98867 struct { const char *prefix; size_t len; } arr[] = {
98868+
98869+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98870+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98871+#endif
98872+
98873 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98874 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98875 };
98876@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98877 if (err)
98878 return err;
98879
98880+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98881+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98882+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98883+ return -EOPNOTSUPP;
98884+ if (size > 8)
98885+ return -EINVAL;
98886+ }
98887+#endif
98888+
98889 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98890 }
98891
98892@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98893 int err = -ENOMEM;
98894
98895 /* Round up to L1_CACHE_BYTES to resist false sharing */
98896- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98897- L1_CACHE_BYTES), GFP_KERNEL);
98898+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98899 if (!sbinfo)
98900 return -ENOMEM;
98901
98902diff --git a/mm/slab.c b/mm/slab.c
98903index 65b5dcb..d53d866 100644
98904--- a/mm/slab.c
98905+++ b/mm/slab.c
98906@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98907 if ((x)->max_freeable < i) \
98908 (x)->max_freeable = i; \
98909 } while (0)
98910-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98911-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98912-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98913-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98914+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98915+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98916+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98917+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98918+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98919+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98920 #else
98921 #define STATS_INC_ACTIVE(x) do { } while (0)
98922 #define STATS_DEC_ACTIVE(x) do { } while (0)
98923@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98924 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98925 #define STATS_INC_FREEHIT(x) do { } while (0)
98926 #define STATS_INC_FREEMISS(x) do { } while (0)
98927+#define STATS_INC_SANITIZED(x) do { } while (0)
98928+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98929 #endif
98930
98931 #if DEBUG
98932@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98933 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98934 */
98935 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98936- const struct page *page, void *obj)
98937+ const struct page *page, const void *obj)
98938 {
98939 u32 offset = (obj - page->s_mem);
98940 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98941@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
98942 * structures first. Without this, further allocations will bug.
98943 */
98944 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
98945- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98946+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98947 slab_state = PARTIAL_NODE;
98948
98949 slab_early_init = 0;
98950@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
98951
98952 cachep = find_mergeable(size, align, flags, name, ctor);
98953 if (cachep) {
98954- cachep->refcount++;
98955+ atomic_inc(&cachep->refcount);
98956
98957 /*
98958 * Adjust the object sizes so that we clear
98959@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98960 struct array_cache *ac = cpu_cache_get(cachep);
98961
98962 check_irq_off();
98963+
98964+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98965+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98966+ STATS_INC_NOT_SANITIZED(cachep);
98967+ else {
98968+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98969+
98970+ if (cachep->ctor)
98971+ cachep->ctor(objp);
98972+
98973+ STATS_INC_SANITIZED(cachep);
98974+ }
98975+#endif
98976+
98977 kmemleak_free_recursive(objp, cachep->flags);
98978 objp = cache_free_debugcheck(cachep, objp, caller);
98979
98980@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
98981 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
98982 }
98983
98984-void *__kmalloc_node(size_t size, gfp_t flags, int node)
98985+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
98986 {
98987 return __do_kmalloc_node(size, flags, node, _RET_IP_);
98988 }
98989@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
98990 * @flags: the type of memory to allocate (see kmalloc).
98991 * @caller: function caller for debug tracking of the caller
98992 */
98993-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
98994+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
98995 unsigned long caller)
98996 {
98997 struct kmem_cache *cachep;
98998@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
98999
99000 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99001 return;
99002+ VM_BUG_ON(!virt_addr_valid(objp));
99003 local_irq_save(flags);
99004 kfree_debugcheck(objp);
99005 c = virt_to_cache(objp);
99006@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99007 }
99008 /* cpu stats */
99009 {
99010- unsigned long allochit = atomic_read(&cachep->allochit);
99011- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99012- unsigned long freehit = atomic_read(&cachep->freehit);
99013- unsigned long freemiss = atomic_read(&cachep->freemiss);
99014+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99015+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99016+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99017+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99018
99019 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99020 allochit, allocmiss, freehit, freemiss);
99021 }
99022+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99023+ {
99024+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99025+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99026+
99027+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99028+ }
99029+#endif
99030 #endif
99031 }
99032
99033@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
99034 static int __init slab_proc_init(void)
99035 {
99036 #ifdef CONFIG_DEBUG_SLAB_LEAK
99037- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99038+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99039 #endif
99040 return 0;
99041 }
99042 module_init(slab_proc_init);
99043 #endif
99044
99045+bool is_usercopy_object(const void *ptr)
99046+{
99047+ struct page *page;
99048+ struct kmem_cache *cachep;
99049+
99050+ if (ZERO_OR_NULL_PTR(ptr))
99051+ return false;
99052+
99053+ if (!slab_is_available())
99054+ return false;
99055+
99056+ if (!virt_addr_valid(ptr))
99057+ return false;
99058+
99059+ page = virt_to_head_page(ptr);
99060+
99061+ if (!PageSlab(page))
99062+ return false;
99063+
99064+ cachep = page->slab_cache;
99065+ return cachep->flags & SLAB_USERCOPY;
99066+}
99067+
99068+#ifdef CONFIG_PAX_USERCOPY
99069+const char *check_heap_object(const void *ptr, unsigned long n)
99070+{
99071+ struct page *page;
99072+ struct kmem_cache *cachep;
99073+ unsigned int objnr;
99074+ unsigned long offset;
99075+
99076+ if (ZERO_OR_NULL_PTR(ptr))
99077+ return "<null>";
99078+
99079+ if (!virt_addr_valid(ptr))
99080+ return NULL;
99081+
99082+ page = virt_to_head_page(ptr);
99083+
99084+ if (!PageSlab(page))
99085+ return NULL;
99086+
99087+ cachep = page->slab_cache;
99088+ if (!(cachep->flags & SLAB_USERCOPY))
99089+ return cachep->name;
99090+
99091+ objnr = obj_to_index(cachep, page, ptr);
99092+ BUG_ON(objnr >= cachep->num);
99093+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99094+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99095+ return NULL;
99096+
99097+ return cachep->name;
99098+}
99099+#endif
99100+
99101 /**
99102 * ksize - get the actual amount of memory allocated for a given object
99103 * @objp: Pointer to the object
99104diff --git a/mm/slab.h b/mm/slab.h
99105index 1cf40054..10ad563 100644
99106--- a/mm/slab.h
99107+++ b/mm/slab.h
99108@@ -22,7 +22,7 @@ struct kmem_cache {
99109 unsigned int align; /* Alignment as calculated */
99110 unsigned long flags; /* Active flags on the slab */
99111 const char *name; /* Slab name for sysfs */
99112- int refcount; /* Use counter */
99113+ atomic_t refcount; /* Use counter */
99114 void (*ctor)(void *); /* Called on object slot creation */
99115 struct list_head list; /* List of all slab caches on the system */
99116 };
99117@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99118 /* The slab cache that manages slab cache information */
99119 extern struct kmem_cache *kmem_cache;
99120
99121+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99122+#ifdef CONFIG_X86_64
99123+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99124+#else
99125+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99126+#endif
99127+enum pax_sanitize_mode {
99128+ PAX_SANITIZE_SLAB_OFF = 0,
99129+ PAX_SANITIZE_SLAB_FAST,
99130+ PAX_SANITIZE_SLAB_FULL,
99131+};
99132+extern enum pax_sanitize_mode pax_sanitize_slab;
99133+#endif
99134+
99135 unsigned long calculate_alignment(unsigned long flags,
99136 unsigned long align, unsigned long size);
99137
99138@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99139
99140 /* Legal flag mask for kmem_cache_create(), for various configurations */
99141 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99142- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99143+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99144+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99145
99146 #if defined(CONFIG_DEBUG_SLAB)
99147 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99148@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99149 return s;
99150
99151 page = virt_to_head_page(x);
99152+
99153+ BUG_ON(!PageSlab(page));
99154+
99155 cachep = page->slab_cache;
99156 if (slab_equal_or_root(cachep, s))
99157 return cachep;
99158diff --git a/mm/slab_common.c b/mm/slab_common.c
99159index e03dd6f..c475838 100644
99160--- a/mm/slab_common.c
99161+++ b/mm/slab_common.c
99162@@ -25,11 +25,35 @@
99163
99164 #include "slab.h"
99165
99166-enum slab_state slab_state;
99167+enum slab_state slab_state __read_only;
99168 LIST_HEAD(slab_caches);
99169 DEFINE_MUTEX(slab_mutex);
99170 struct kmem_cache *kmem_cache;
99171
99172+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99173+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99174+static int __init pax_sanitize_slab_setup(char *str)
99175+{
99176+ if (!str)
99177+ return 0;
99178+
99179+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99180+ pr_info("PaX slab sanitization: %s\n", "disabled");
99181+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99182+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99183+ pr_info("PaX slab sanitization: %s\n", "fast");
99184+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99185+ } else if (!strcmp(str, "full")) {
99186+ pr_info("PaX slab sanitization: %s\n", "full");
99187+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99188+ } else
99189+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99190+
99191+ return 0;
99192+}
99193+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99194+#endif
99195+
99196 /*
99197 * Set of flags that will prevent slab merging
99198 */
99199@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99200 * Merge control. If this is set then no merging of slab caches will occur.
99201 * (Could be removed. This was introduced to pacify the merge skeptics.)
99202 */
99203-static int slab_nomerge;
99204+static int slab_nomerge = 1;
99205
99206 static int __init setup_slab_nomerge(char *str)
99207 {
99208@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99209 /*
99210 * We may have set a slab to be unmergeable during bootstrap.
99211 */
99212- if (s->refcount < 0)
99213+ if (atomic_read(&s->refcount) < 0)
99214 return 1;
99215
99216 return 0;
99217@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99218 if (err)
99219 goto out_free_cache;
99220
99221- s->refcount = 1;
99222+ atomic_set(&s->refcount, 1);
99223 list_add(&s->list, &slab_caches);
99224 out:
99225 if (err)
99226@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99227 */
99228 flags &= CACHE_CREATE_MASK;
99229
99230+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99231+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99232+ flags |= SLAB_NO_SANITIZE;
99233+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99234+ flags &= ~SLAB_NO_SANITIZE;
99235+#endif
99236+
99237 s = __kmem_cache_alias(name, size, align, flags, ctor);
99238 if (s)
99239 goto out_unlock;
99240@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99241
99242 mutex_lock(&slab_mutex);
99243
99244- s->refcount--;
99245- if (s->refcount)
99246+ if (!atomic_dec_and_test(&s->refcount))
99247 goto out_unlock;
99248
99249 if (memcg_cleanup_cache_params(s) != 0)
99250@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99251 rcu_barrier();
99252
99253 memcg_free_cache_params(s);
99254-#ifdef SLAB_SUPPORTS_SYSFS
99255+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99256 sysfs_slab_remove(s);
99257 #else
99258 slab_kmem_cache_release(s);
99259@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99260 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99261 name, size, err);
99262
99263- s->refcount = -1; /* Exempt from merging for now */
99264+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99265 }
99266
99267 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99268@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99269
99270 create_boot_cache(s, name, size, flags);
99271 list_add(&s->list, &slab_caches);
99272- s->refcount = 1;
99273+ atomic_set(&s->refcount, 1);
99274 return s;
99275 }
99276
99277@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99278 EXPORT_SYMBOL(kmalloc_dma_caches);
99279 #endif
99280
99281+#ifdef CONFIG_PAX_USERCOPY_SLABS
99282+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99283+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99284+#endif
99285+
99286 /*
99287 * Conversion table for small slabs sizes / 8 to the index in the
99288 * kmalloc array. This is necessary for slabs < 192 since we have non power
99289@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99290 return kmalloc_dma_caches[index];
99291
99292 #endif
99293+
99294+#ifdef CONFIG_PAX_USERCOPY_SLABS
99295+ if (unlikely((flags & GFP_USERCOPY)))
99296+ return kmalloc_usercopy_caches[index];
99297+
99298+#endif
99299+
99300 return kmalloc_caches[index];
99301 }
99302
99303@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99304 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99305 if (!kmalloc_caches[i]) {
99306 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99307- 1 << i, flags);
99308+ 1 << i, SLAB_USERCOPY | flags);
99309 }
99310
99311 /*
99312@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99313 * earlier power of two caches
99314 */
99315 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99316- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99317+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99318
99319 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99320- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99321+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99322 }
99323
99324 /* Kmalloc array is now usable */
99325@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99326 }
99327 }
99328 #endif
99329+
99330+#ifdef CONFIG_PAX_USERCOPY_SLABS
99331+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99332+ struct kmem_cache *s = kmalloc_caches[i];
99333+
99334+ if (s) {
99335+ int size = kmalloc_size(i);
99336+ char *n = kasprintf(GFP_NOWAIT,
99337+ "usercopy-kmalloc-%d", size);
99338+
99339+ BUG_ON(!n);
99340+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99341+ size, SLAB_USERCOPY | flags);
99342+ }
99343+ }
99344+#endif
99345+
99346 }
99347 #endif /* !CONFIG_SLOB */
99348
99349@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99350 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99351 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99352 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99353+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99354+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99355+#endif
99356 #endif
99357 seq_putc(m, '\n');
99358 }
99359@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99360 module_init(slab_proc_init);
99361 #endif /* CONFIG_SLABINFO */
99362
99363-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99364+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99365 gfp_t flags)
99366 {
99367 void *ret;
99368diff --git a/mm/slob.c b/mm/slob.c
99369index 96a8620..46b3f12 100644
99370--- a/mm/slob.c
99371+++ b/mm/slob.c
99372@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99373 /*
99374 * Return the size of a slob block.
99375 */
99376-static slobidx_t slob_units(slob_t *s)
99377+static slobidx_t slob_units(const slob_t *s)
99378 {
99379 if (s->units > 0)
99380 return s->units;
99381@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99382 /*
99383 * Return the next free slob block pointer after this one.
99384 */
99385-static slob_t *slob_next(slob_t *s)
99386+static slob_t *slob_next(const slob_t *s)
99387 {
99388 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99389 slobidx_t next;
99390@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99391 /*
99392 * Returns true if s is the last free block in its page.
99393 */
99394-static int slob_last(slob_t *s)
99395+static int slob_last(const slob_t *s)
99396 {
99397 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99398 }
99399
99400-static void *slob_new_pages(gfp_t gfp, int order, int node)
99401+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99402 {
99403- void *page;
99404+ struct page *page;
99405
99406 #ifdef CONFIG_NUMA
99407 if (node != NUMA_NO_NODE)
99408@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99409 if (!page)
99410 return NULL;
99411
99412- return page_address(page);
99413+ __SetPageSlab(page);
99414+ return page;
99415 }
99416
99417-static void slob_free_pages(void *b, int order)
99418+static void slob_free_pages(struct page *sp, int order)
99419 {
99420 if (current->reclaim_state)
99421 current->reclaim_state->reclaimed_slab += 1 << order;
99422- free_pages((unsigned long)b, order);
99423+ __ClearPageSlab(sp);
99424+ page_mapcount_reset(sp);
99425+ sp->private = 0;
99426+ __free_pages(sp, order);
99427 }
99428
99429 /*
99430@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99431
99432 /* Not enough space: must allocate a new page */
99433 if (!b) {
99434- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99435- if (!b)
99436+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99437+ if (!sp)
99438 return NULL;
99439- sp = virt_to_page(b);
99440- __SetPageSlab(sp);
99441+ b = page_address(sp);
99442
99443 spin_lock_irqsave(&slob_lock, flags);
99444 sp->units = SLOB_UNITS(PAGE_SIZE);
99445 sp->freelist = b;
99446+ sp->private = 0;
99447 INIT_LIST_HEAD(&sp->lru);
99448 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99449 set_slob_page_free(sp, slob_list);
99450@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99451 /*
99452 * slob_free: entry point into the slob allocator.
99453 */
99454-static void slob_free(void *block, int size)
99455+static void slob_free(struct kmem_cache *c, void *block, int size)
99456 {
99457 struct page *sp;
99458 slob_t *prev, *next, *b = (slob_t *)block;
99459@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99460 if (slob_page_free(sp))
99461 clear_slob_page_free(sp);
99462 spin_unlock_irqrestore(&slob_lock, flags);
99463- __ClearPageSlab(sp);
99464- page_mapcount_reset(sp);
99465- slob_free_pages(b, 0);
99466+ slob_free_pages(sp, 0);
99467 return;
99468 }
99469
99470+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99471+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99472+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99473+#endif
99474+
99475 if (!slob_page_free(sp)) {
99476 /* This slob page is about to become partially free. Easy! */
99477 sp->units = units;
99478@@ -424,11 +431,10 @@ out:
99479 */
99480
99481 static __always_inline void *
99482-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99483+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99484 {
99485- unsigned int *m;
99486- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99487- void *ret;
99488+ slob_t *m;
99489+ void *ret = NULL;
99490
99491 gfp &= gfp_allowed_mask;
99492
99493@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99494
99495 if (!m)
99496 return NULL;
99497- *m = size;
99498+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99499+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99500+ m[0].units = size;
99501+ m[1].units = align;
99502 ret = (void *)m + align;
99503
99504 trace_kmalloc_node(caller, ret,
99505 size, size + align, gfp, node);
99506 } else {
99507 unsigned int order = get_order(size);
99508+ struct page *page;
99509
99510 if (likely(order))
99511 gfp |= __GFP_COMP;
99512- ret = slob_new_pages(gfp, order, node);
99513+ page = slob_new_pages(gfp, order, node);
99514+ if (page) {
99515+ ret = page_address(page);
99516+ page->private = size;
99517+ }
99518
99519 trace_kmalloc_node(caller, ret,
99520 size, PAGE_SIZE << order, gfp, node);
99521 }
99522
99523- kmemleak_alloc(ret, size, 1, gfp);
99524 return ret;
99525 }
99526
99527-void *__kmalloc(size_t size, gfp_t gfp)
99528+static __always_inline void *
99529+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99530+{
99531+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99532+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99533+
99534+ if (!ZERO_OR_NULL_PTR(ret))
99535+ kmemleak_alloc(ret, size, 1, gfp);
99536+ return ret;
99537+}
99538+
99539+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99540 {
99541 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99542 }
99543@@ -491,34 +515,112 @@ void kfree(const void *block)
99544 return;
99545 kmemleak_free(block);
99546
99547+ VM_BUG_ON(!virt_addr_valid(block));
99548 sp = virt_to_page(block);
99549- if (PageSlab(sp)) {
99550+ VM_BUG_ON(!PageSlab(sp));
99551+ if (!sp->private) {
99552 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99553- unsigned int *m = (unsigned int *)(block - align);
99554- slob_free(m, *m + align);
99555- } else
99556+ slob_t *m = (slob_t *)(block - align);
99557+ slob_free(NULL, m, m[0].units + align);
99558+ } else {
99559+ __ClearPageSlab(sp);
99560+ page_mapcount_reset(sp);
99561+ sp->private = 0;
99562 __free_pages(sp, compound_order(sp));
99563+ }
99564 }
99565 EXPORT_SYMBOL(kfree);
99566
99567+bool is_usercopy_object(const void *ptr)
99568+{
99569+ if (!slab_is_available())
99570+ return false;
99571+
99572+ // PAX: TODO
99573+
99574+ return false;
99575+}
99576+
99577+#ifdef CONFIG_PAX_USERCOPY
99578+const char *check_heap_object(const void *ptr, unsigned long n)
99579+{
99580+ struct page *page;
99581+ const slob_t *free;
99582+ const void *base;
99583+ unsigned long flags;
99584+
99585+ if (ZERO_OR_NULL_PTR(ptr))
99586+ return "<null>";
99587+
99588+ if (!virt_addr_valid(ptr))
99589+ return NULL;
99590+
99591+ page = virt_to_head_page(ptr);
99592+ if (!PageSlab(page))
99593+ return NULL;
99594+
99595+ if (page->private) {
99596+ base = page;
99597+ if (base <= ptr && n <= page->private - (ptr - base))
99598+ return NULL;
99599+ return "<slob>";
99600+ }
99601+
99602+ /* some tricky double walking to find the chunk */
99603+ spin_lock_irqsave(&slob_lock, flags);
99604+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99605+ free = page->freelist;
99606+
99607+ while (!slob_last(free) && (void *)free <= ptr) {
99608+ base = free + slob_units(free);
99609+ free = slob_next(free);
99610+ }
99611+
99612+ while (base < (void *)free) {
99613+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99614+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99615+ int offset;
99616+
99617+ if (ptr < base + align)
99618+ break;
99619+
99620+ offset = ptr - base - align;
99621+ if (offset >= m) {
99622+ base += size;
99623+ continue;
99624+ }
99625+
99626+ if (n > m - offset)
99627+ break;
99628+
99629+ spin_unlock_irqrestore(&slob_lock, flags);
99630+ return NULL;
99631+ }
99632+
99633+ spin_unlock_irqrestore(&slob_lock, flags);
99634+ return "<slob>";
99635+}
99636+#endif
99637+
99638 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99639 size_t ksize(const void *block)
99640 {
99641 struct page *sp;
99642 int align;
99643- unsigned int *m;
99644+ slob_t *m;
99645
99646 BUG_ON(!block);
99647 if (unlikely(block == ZERO_SIZE_PTR))
99648 return 0;
99649
99650 sp = virt_to_page(block);
99651- if (unlikely(!PageSlab(sp)))
99652- return PAGE_SIZE << compound_order(sp);
99653+ VM_BUG_ON(!PageSlab(sp));
99654+ if (sp->private)
99655+ return sp->private;
99656
99657 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99658- m = (unsigned int *)(block - align);
99659- return SLOB_UNITS(*m) * SLOB_UNIT;
99660+ m = (slob_t *)(block - align);
99661+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99662 }
99663 EXPORT_SYMBOL(ksize);
99664
99665@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99666
99667 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99668 {
99669- void *b;
99670+ void *b = NULL;
99671
99672 flags &= gfp_allowed_mask;
99673
99674 lockdep_trace_alloc(flags);
99675
99676+#ifdef CONFIG_PAX_USERCOPY_SLABS
99677+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99678+#else
99679 if (c->size < PAGE_SIZE) {
99680 b = slob_alloc(c->size, flags, c->align, node);
99681 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99682 SLOB_UNITS(c->size) * SLOB_UNIT,
99683 flags, node);
99684 } else {
99685- b = slob_new_pages(flags, get_order(c->size), node);
99686+ struct page *sp;
99687+
99688+ sp = slob_new_pages(flags, get_order(c->size), node);
99689+ if (sp) {
99690+ b = page_address(sp);
99691+ sp->private = c->size;
99692+ }
99693 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99694 PAGE_SIZE << get_order(c->size),
99695 flags, node);
99696 }
99697+#endif
99698
99699 if (b && c->ctor)
99700 c->ctor(b);
99701@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99702 EXPORT_SYMBOL(kmem_cache_alloc);
99703
99704 #ifdef CONFIG_NUMA
99705-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99706+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99707 {
99708 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99709 }
99710@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99711 EXPORT_SYMBOL(kmem_cache_alloc_node);
99712 #endif
99713
99714-static void __kmem_cache_free(void *b, int size)
99715+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99716 {
99717- if (size < PAGE_SIZE)
99718- slob_free(b, size);
99719+ struct page *sp;
99720+
99721+ sp = virt_to_page(b);
99722+ BUG_ON(!PageSlab(sp));
99723+ if (!sp->private)
99724+ slob_free(c, b, size);
99725 else
99726- slob_free_pages(b, get_order(size));
99727+ slob_free_pages(sp, get_order(size));
99728 }
99729
99730 static void kmem_rcu_free(struct rcu_head *head)
99731@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99732 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99733 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99734
99735- __kmem_cache_free(b, slob_rcu->size);
99736+ __kmem_cache_free(NULL, b, slob_rcu->size);
99737 }
99738
99739 void kmem_cache_free(struct kmem_cache *c, void *b)
99740 {
99741+ int size = c->size;
99742+
99743+#ifdef CONFIG_PAX_USERCOPY_SLABS
99744+ if (size + c->align < PAGE_SIZE) {
99745+ size += c->align;
99746+ b -= c->align;
99747+ }
99748+#endif
99749+
99750 kmemleak_free_recursive(b, c->flags);
99751 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99752 struct slob_rcu *slob_rcu;
99753- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99754- slob_rcu->size = c->size;
99755+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99756+ slob_rcu->size = size;
99757 call_rcu(&slob_rcu->head, kmem_rcu_free);
99758 } else {
99759- __kmem_cache_free(b, c->size);
99760+ __kmem_cache_free(c, b, size);
99761 }
99762
99763+#ifdef CONFIG_PAX_USERCOPY_SLABS
99764+ trace_kfree(_RET_IP_, b);
99765+#else
99766 trace_kmem_cache_free(_RET_IP_, b);
99767+#endif
99768+
99769 }
99770 EXPORT_SYMBOL(kmem_cache_free);
99771
99772diff --git a/mm/slub.c b/mm/slub.c
99773index fe376fe..2f5757c 100644
99774--- a/mm/slub.c
99775+++ b/mm/slub.c
99776@@ -197,7 +197,7 @@ struct track {
99777
99778 enum track_item { TRACK_ALLOC, TRACK_FREE };
99779
99780-#ifdef CONFIG_SYSFS
99781+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99782 static int sysfs_slab_add(struct kmem_cache *);
99783 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99784 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99785@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99786 if (!t->addr)
99787 return;
99788
99789- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99790+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99791 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99792 #ifdef CONFIG_STACKTRACE
99793 {
99794@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99795
99796 slab_free_hook(s, x);
99797
99798+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99799+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99800+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99801+ if (s->ctor)
99802+ s->ctor(x);
99803+ }
99804+#endif
99805+
99806 redo:
99807 /*
99808 * Determine the currently cpus per cpu slab.
99809@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99810 s->inuse = size;
99811
99812 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99813+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99814+ (!(flags & SLAB_NO_SANITIZE)) ||
99815+#endif
99816 s->ctor)) {
99817 /*
99818 * Relocate free pointer after the object if it is not
99819@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99820
99821 __setup("slub_min_objects=", setup_slub_min_objects);
99822
99823-void *__kmalloc(size_t size, gfp_t flags)
99824+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99825 {
99826 struct kmem_cache *s;
99827 void *ret;
99828@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99829 return ptr;
99830 }
99831
99832-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99833+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99834 {
99835 struct kmem_cache *s;
99836 void *ret;
99837@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99838 EXPORT_SYMBOL(__kmalloc_node);
99839 #endif
99840
99841+bool is_usercopy_object(const void *ptr)
99842+{
99843+ struct page *page;
99844+ struct kmem_cache *s;
99845+
99846+ if (ZERO_OR_NULL_PTR(ptr))
99847+ return false;
99848+
99849+ if (!slab_is_available())
99850+ return false;
99851+
99852+ if (!virt_addr_valid(ptr))
99853+ return false;
99854+
99855+ page = virt_to_head_page(ptr);
99856+
99857+ if (!PageSlab(page))
99858+ return false;
99859+
99860+ s = page->slab_cache;
99861+ return s->flags & SLAB_USERCOPY;
99862+}
99863+
99864+#ifdef CONFIG_PAX_USERCOPY
99865+const char *check_heap_object(const void *ptr, unsigned long n)
99866+{
99867+ struct page *page;
99868+ struct kmem_cache *s;
99869+ unsigned long offset;
99870+
99871+ if (ZERO_OR_NULL_PTR(ptr))
99872+ return "<null>";
99873+
99874+ if (!virt_addr_valid(ptr))
99875+ return NULL;
99876+
99877+ page = virt_to_head_page(ptr);
99878+
99879+ if (!PageSlab(page))
99880+ return NULL;
99881+
99882+ s = page->slab_cache;
99883+ if (!(s->flags & SLAB_USERCOPY))
99884+ return s->name;
99885+
99886+ offset = (ptr - page_address(page)) % s->size;
99887+ if (offset <= s->object_size && n <= s->object_size - offset)
99888+ return NULL;
99889+
99890+ return s->name;
99891+}
99892+#endif
99893+
99894 size_t ksize(const void *object)
99895 {
99896 struct page *page;
99897@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99898 if (unlikely(ZERO_OR_NULL_PTR(x)))
99899 return;
99900
99901+ VM_BUG_ON(!virt_addr_valid(x));
99902 page = virt_to_head_page(x);
99903 if (unlikely(!PageSlab(page))) {
99904 BUG_ON(!PageCompound(page));
99905@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99906 int i;
99907 struct kmem_cache *c;
99908
99909- s->refcount++;
99910+ atomic_inc(&s->refcount);
99911
99912 /*
99913 * Adjust the object sizes so that we clear
99914@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99915 }
99916
99917 if (sysfs_slab_alias(s, name)) {
99918- s->refcount--;
99919+ atomic_dec(&s->refcount);
99920 s = NULL;
99921 }
99922 }
99923@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99924 }
99925 #endif
99926
99927-#ifdef CONFIG_SYSFS
99928+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99929 static int count_inuse(struct page *page)
99930 {
99931 return page->inuse;
99932@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99933 len += sprintf(buf + len, "%7ld ", l->count);
99934
99935 if (l->addr)
99936+#ifdef CONFIG_GRKERNSEC_HIDESYM
99937+ len += sprintf(buf + len, "%pS", NULL);
99938+#else
99939 len += sprintf(buf + len, "%pS", (void *)l->addr);
99940+#endif
99941 else
99942 len += sprintf(buf + len, "<not-available>");
99943
99944@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
99945 validate_slab_cache(kmalloc_caches[9]);
99946 }
99947 #else
99948-#ifdef CONFIG_SYSFS
99949+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99950 static void resiliency_test(void) {};
99951 #endif
99952 #endif
99953
99954-#ifdef CONFIG_SYSFS
99955+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99956 enum slab_stat_type {
99957 SL_ALL, /* All slabs */
99958 SL_PARTIAL, /* Only partially allocated slabs */
99959@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99960 {
99961 if (!s->ctor)
99962 return 0;
99963+#ifdef CONFIG_GRKERNSEC_HIDESYM
99964+ return sprintf(buf, "%pS\n", NULL);
99965+#else
99966 return sprintf(buf, "%pS\n", s->ctor);
99967+#endif
99968 }
99969 SLAB_ATTR_RO(ctor);
99970
99971 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99972 {
99973- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99974+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99975 }
99976 SLAB_ATTR_RO(aliases);
99977
99978@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99979 SLAB_ATTR_RO(cache_dma);
99980 #endif
99981
99982+#ifdef CONFIG_PAX_USERCOPY_SLABS
99983+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99984+{
99985+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99986+}
99987+SLAB_ATTR_RO(usercopy);
99988+#endif
99989+
99990+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99991+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99992+{
99993+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99994+}
99995+SLAB_ATTR_RO(sanitize);
99996+#endif
99997+
99998 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99999 {
100000 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100001@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
100002 * as well as cause other issues like converting a mergeable
100003 * cache into an umergeable one.
100004 */
100005- if (s->refcount > 1)
100006+ if (atomic_read(&s->refcount) > 1)
100007 return -EINVAL;
100008
100009 s->flags &= ~SLAB_TRACE;
100010@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
100011 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
100012 size_t length)
100013 {
100014- if (s->refcount > 1)
100015+ if (atomic_read(&s->refcount) > 1)
100016 return -EINVAL;
100017
100018 s->flags &= ~SLAB_FAILSLAB;
100019@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
100020 #ifdef CONFIG_ZONE_DMA
100021 &cache_dma_attr.attr,
100022 #endif
100023+#ifdef CONFIG_PAX_USERCOPY_SLABS
100024+ &usercopy_attr.attr,
100025+#endif
100026+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100027+ &sanitize_attr.attr,
100028+#endif
100029 #ifdef CONFIG_NUMA
100030 &remote_node_defrag_ratio_attr.attr,
100031 #endif
100032@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
100033 return name;
100034 }
100035
100036+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100037 static int sysfs_slab_add(struct kmem_cache *s)
100038 {
100039 int err;
100040@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100041 kobject_del(&s->kobj);
100042 kobject_put(&s->kobj);
100043 }
100044+#endif
100045
100046 /*
100047 * Need to buffer aliases during bootup until sysfs becomes
100048@@ -5161,6 +5258,7 @@ struct saved_alias {
100049
100050 static struct saved_alias *alias_list;
100051
100052+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100053 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100054 {
100055 struct saved_alias *al;
100056@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100057 alias_list = al;
100058 return 0;
100059 }
100060+#endif
100061
100062 static int __init slab_sysfs_init(void)
100063 {
100064diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100065index 4cba9c2..b4f9fcc 100644
100066--- a/mm/sparse-vmemmap.c
100067+++ b/mm/sparse-vmemmap.c
100068@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100069 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100070 if (!p)
100071 return NULL;
100072- pud_populate(&init_mm, pud, p);
100073+ pud_populate_kernel(&init_mm, pud, p);
100074 }
100075 return pud;
100076 }
100077@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100078 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100079 if (!p)
100080 return NULL;
100081- pgd_populate(&init_mm, pgd, p);
100082+ pgd_populate_kernel(&init_mm, pgd, p);
100083 }
100084 return pgd;
100085 }
100086diff --git a/mm/sparse.c b/mm/sparse.c
100087index d1b48b6..6e8590e 100644
100088--- a/mm/sparse.c
100089+++ b/mm/sparse.c
100090@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100091
100092 for (i = 0; i < PAGES_PER_SECTION; i++) {
100093 if (PageHWPoison(&memmap[i])) {
100094- atomic_long_sub(1, &num_poisoned_pages);
100095+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100096 ClearPageHWPoison(&memmap[i]);
100097 }
100098 }
100099diff --git a/mm/swap.c b/mm/swap.c
100100index 8a12b33..7068e78 100644
100101--- a/mm/swap.c
100102+++ b/mm/swap.c
100103@@ -31,6 +31,7 @@
100104 #include <linux/memcontrol.h>
100105 #include <linux/gfp.h>
100106 #include <linux/uio.h>
100107+#include <linux/hugetlb.h>
100108
100109 #include "internal.h"
100110
100111@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100112
100113 __page_cache_release(page);
100114 dtor = get_compound_page_dtor(page);
100115+ if (!PageHuge(page))
100116+ BUG_ON(dtor != free_compound_page);
100117 (*dtor)(page);
100118 }
100119
100120diff --git a/mm/swapfile.c b/mm/swapfile.c
100121index 63f55cc..31874e6 100644
100122--- a/mm/swapfile.c
100123+++ b/mm/swapfile.c
100124@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100125
100126 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100127 /* Activity counter to indicate that a swapon or swapoff has occurred */
100128-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100129+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100130
100131 static inline unsigned char swap_count(unsigned char ent)
100132 {
100133@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100134 spin_unlock(&swap_lock);
100135
100136 err = 0;
100137- atomic_inc(&proc_poll_event);
100138+ atomic_inc_unchecked(&proc_poll_event);
100139 wake_up_interruptible(&proc_poll_wait);
100140
100141 out_dput:
100142@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100143
100144 poll_wait(file, &proc_poll_wait, wait);
100145
100146- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100147- seq->poll_event = atomic_read(&proc_poll_event);
100148+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100149+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100150 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100151 }
100152
100153@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100154 return ret;
100155
100156 seq = file->private_data;
100157- seq->poll_event = atomic_read(&proc_poll_event);
100158+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100159 return 0;
100160 }
100161
100162@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100163 (frontswap_map) ? "FS" : "");
100164
100165 mutex_unlock(&swapon_mutex);
100166- atomic_inc(&proc_poll_event);
100167+ atomic_inc_unchecked(&proc_poll_event);
100168 wake_up_interruptible(&proc_poll_wait);
100169
100170 if (S_ISREG(inode->i_mode))
100171diff --git a/mm/util.c b/mm/util.c
100172index fec39d4..3e60325 100644
100173--- a/mm/util.c
100174+++ b/mm/util.c
100175@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100176 void arch_pick_mmap_layout(struct mm_struct *mm)
100177 {
100178 mm->mmap_base = TASK_UNMAPPED_BASE;
100179+
100180+#ifdef CONFIG_PAX_RANDMMAP
100181+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100182+ mm->mmap_base += mm->delta_mmap;
100183+#endif
100184+
100185 mm->get_unmapped_area = arch_get_unmapped_area;
100186 }
100187 #endif
100188@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100189 if (!mm->arg_end)
100190 goto out_mm; /* Shh! No looking before we're done */
100191
100192+ if (gr_acl_handle_procpidmem(task))
100193+ goto out_mm;
100194+
100195 len = mm->arg_end - mm->arg_start;
100196
100197 if (len > buflen)
100198diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100199index 39c3388..7d976d4 100644
100200--- a/mm/vmalloc.c
100201+++ b/mm/vmalloc.c
100202@@ -39,20 +39,65 @@ struct vfree_deferred {
100203 struct work_struct wq;
100204 };
100205 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100206+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100207+
100208+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100209+struct stack_deferred_llist {
100210+ struct llist_head list;
100211+ void *stack;
100212+ void *lowmem_stack;
100213+};
100214+
100215+struct stack_deferred {
100216+ struct stack_deferred_llist list;
100217+ struct work_struct wq;
100218+};
100219+
100220+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100221+#endif
100222
100223 static void __vunmap(const void *, int);
100224
100225-static void free_work(struct work_struct *w)
100226+static void vfree_work(struct work_struct *w)
100227+{
100228+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100229+ struct llist_node *llnode = llist_del_all(&p->list);
100230+ while (llnode) {
100231+ void *x = llnode;
100232+ llnode = llist_next(llnode);
100233+ __vunmap(x, 1);
100234+ }
100235+}
100236+
100237+static void vunmap_work(struct work_struct *w)
100238 {
100239 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100240 struct llist_node *llnode = llist_del_all(&p->list);
100241 while (llnode) {
100242 void *p = llnode;
100243 llnode = llist_next(llnode);
100244- __vunmap(p, 1);
100245+ __vunmap(p, 0);
100246 }
100247 }
100248
100249+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100250+static void unmap_work(struct work_struct *w)
100251+{
100252+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100253+ struct llist_node *llnode = llist_del_all(&p->list.list);
100254+ while (llnode) {
100255+ struct stack_deferred_llist *x =
100256+ llist_entry((struct llist_head *)llnode,
100257+ struct stack_deferred_llist, list);
100258+ void *stack = ACCESS_ONCE(x->stack);
100259+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100260+ llnode = llist_next(llnode);
100261+ __vunmap(stack, 0);
100262+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100263+ }
100264+}
100265+#endif
100266+
100267 /*** Page table manipulation functions ***/
100268
100269 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100270@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100271
100272 pte = pte_offset_kernel(pmd, addr);
100273 do {
100274- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100275- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100276+
100277+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100278+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100279+ BUG_ON(!pte_exec(*pte));
100280+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100281+ continue;
100282+ }
100283+#endif
100284+
100285+ {
100286+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100287+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100288+ }
100289 } while (pte++, addr += PAGE_SIZE, addr != end);
100290 }
100291
100292@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100293 pte = pte_alloc_kernel(pmd, addr);
100294 if (!pte)
100295 return -ENOMEM;
100296+
100297+ pax_open_kernel();
100298 do {
100299 struct page *page = pages[*nr];
100300
100301- if (WARN_ON(!pte_none(*pte)))
100302+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100303+ if (pgprot_val(prot) & _PAGE_NX)
100304+#endif
100305+
100306+ if (!pte_none(*pte)) {
100307+ pax_close_kernel();
100308+ WARN_ON(1);
100309 return -EBUSY;
100310- if (WARN_ON(!page))
100311+ }
100312+ if (!page) {
100313+ pax_close_kernel();
100314+ WARN_ON(1);
100315 return -ENOMEM;
100316+ }
100317 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100318 (*nr)++;
100319 } while (pte++, addr += PAGE_SIZE, addr != end);
100320+ pax_close_kernel();
100321 return 0;
100322 }
100323
100324@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100325 pmd_t *pmd;
100326 unsigned long next;
100327
100328- pmd = pmd_alloc(&init_mm, pud, addr);
100329+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100330 if (!pmd)
100331 return -ENOMEM;
100332 do {
100333@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100334 pud_t *pud;
100335 unsigned long next;
100336
100337- pud = pud_alloc(&init_mm, pgd, addr);
100338+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100339 if (!pud)
100340 return -ENOMEM;
100341 do {
100342@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100343 if (addr >= MODULES_VADDR && addr < MODULES_END)
100344 return 1;
100345 #endif
100346+
100347+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100348+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100349+ return 1;
100350+#endif
100351+
100352 return is_vmalloc_addr(x);
100353 }
100354
100355@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100356
100357 if (!pgd_none(*pgd)) {
100358 pud_t *pud = pud_offset(pgd, addr);
100359+#ifdef CONFIG_X86
100360+ if (!pud_large(*pud))
100361+#endif
100362 if (!pud_none(*pud)) {
100363 pmd_t *pmd = pmd_offset(pud, addr);
100364+#ifdef CONFIG_X86
100365+ if (!pmd_large(*pmd))
100366+#endif
100367 if (!pmd_none(*pmd)) {
100368 pte_t *ptep, pte;
100369
100370@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100371 * Allocate a region of KVA of the specified size and alignment, within the
100372 * vstart and vend.
100373 */
100374-static struct vmap_area *alloc_vmap_area(unsigned long size,
100375+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100376 unsigned long align,
100377 unsigned long vstart, unsigned long vend,
100378 int node, gfp_t gfp_mask)
100379@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100380 for_each_possible_cpu(i) {
100381 struct vmap_block_queue *vbq;
100382 struct vfree_deferred *p;
100383+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100384+ struct stack_deferred *p2;
100385+#endif
100386
100387 vbq = &per_cpu(vmap_block_queue, i);
100388 spin_lock_init(&vbq->lock);
100389 INIT_LIST_HEAD(&vbq->free);
100390+
100391 p = &per_cpu(vfree_deferred, i);
100392 init_llist_head(&p->list);
100393- INIT_WORK(&p->wq, free_work);
100394+ INIT_WORK(&p->wq, vfree_work);
100395+
100396+ p = &per_cpu(vunmap_deferred, i);
100397+ init_llist_head(&p->list);
100398+ INIT_WORK(&p->wq, vunmap_work);
100399+
100400+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100401+ p2 = &per_cpu(stack_deferred, i);
100402+ init_llist_head(&p2->list.list);
100403+ INIT_WORK(&p2->wq, unmap_work);
100404+#endif
100405 }
100406
100407 /* Import existing vmlist entries. */
100408@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100409 struct vm_struct *area;
100410
100411 BUG_ON(in_interrupt());
100412+
100413+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100414+ if (flags & VM_KERNEXEC) {
100415+ if (start != VMALLOC_START || end != VMALLOC_END)
100416+ return NULL;
100417+ start = (unsigned long)MODULES_EXEC_VADDR;
100418+ end = (unsigned long)MODULES_EXEC_END;
100419+ }
100420+#endif
100421+
100422 if (flags & VM_IOREMAP)
100423 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100424
100425@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100426 */
100427 void vunmap(const void *addr)
100428 {
100429- BUG_ON(in_interrupt());
100430- might_sleep();
100431- if (addr)
100432+ if (!addr)
100433+ return;
100434+
100435+ if (unlikely(in_interrupt())) {
100436+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100437+ if (llist_add((struct llist_node *)addr, &p->list))
100438+ schedule_work(&p->wq);
100439+ } else {
100440+ might_sleep();
100441 __vunmap(addr, 0);
100442+ }
100443 }
100444 EXPORT_SYMBOL(vunmap);
100445
100446+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100447+void unmap_process_stacks(struct task_struct *task)
100448+{
100449+ if (unlikely(in_interrupt())) {
100450+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100451+ struct stack_deferred_llist *list = task->stack;
100452+ list->stack = task->stack;
100453+ list->lowmem_stack = task->lowmem_stack;
100454+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100455+ schedule_work(&p->wq);
100456+ } else {
100457+ __vunmap(task->stack, 0);
100458+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100459+ }
100460+}
100461+#endif
100462+
100463 /**
100464 * vmap - map an array of pages into virtually contiguous space
100465 * @pages: array of page pointers
100466@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100467 if (count > totalram_pages)
100468 return NULL;
100469
100470+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100471+ if (!(pgprot_val(prot) & _PAGE_NX))
100472+ flags |= VM_KERNEXEC;
100473+#endif
100474+
100475 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100476 __builtin_return_address(0));
100477 if (!area)
100478@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100479 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100480 goto fail;
100481
100482+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100483+ if (!(pgprot_val(prot) & _PAGE_NX))
100484+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100485+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100486+ else
100487+#endif
100488+
100489 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100490 start, end, node, gfp_mask, caller);
100491 if (!area)
100492@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100493 * For tight control over page level allocator and protection flags
100494 * use __vmalloc() instead.
100495 */
100496-
100497 void *vmalloc_exec(unsigned long size)
100498 {
100499- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100500+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100501 NUMA_NO_NODE, __builtin_return_address(0));
100502 }
100503
100504@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100505 {
100506 struct vm_struct *area;
100507
100508+ BUG_ON(vma->vm_mirror);
100509+
100510 size = PAGE_ALIGN(size);
100511
100512 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100513@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100514 v->addr, v->addr + v->size, v->size);
100515
100516 if (v->caller)
100517+#ifdef CONFIG_GRKERNSEC_HIDESYM
100518+ seq_printf(m, " %pK", v->caller);
100519+#else
100520 seq_printf(m, " %pS", v->caller);
100521+#endif
100522
100523 if (v->nr_pages)
100524 seq_printf(m, " pages=%d", v->nr_pages);
100525diff --git a/mm/vmstat.c b/mm/vmstat.c
100526index cdac773..7dd324e 100644
100527--- a/mm/vmstat.c
100528+++ b/mm/vmstat.c
100529@@ -24,6 +24,7 @@
100530 #include <linux/mm_inline.h>
100531 #include <linux/page_ext.h>
100532 #include <linux/page_owner.h>
100533+#include <linux/grsecurity.h>
100534
100535 #include "internal.h"
100536
100537@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100538 *
100539 * vm_stat contains the global counters
100540 */
100541-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100542+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100543 EXPORT_SYMBOL(vm_stat);
100544
100545 #ifdef CONFIG_SMP
100546@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100547
100548 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100549 if (diff[i]) {
100550- atomic_long_add(diff[i], &vm_stat[i]);
100551+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100552 changes++;
100553 }
100554 return changes;
100555@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100556 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100557 if (v) {
100558
100559- atomic_long_add(v, &zone->vm_stat[i]);
100560+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100561 global_diff[i] += v;
100562 #ifdef CONFIG_NUMA
100563 /* 3 seconds idle till flush */
100564@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100565
100566 v = p->vm_stat_diff[i];
100567 p->vm_stat_diff[i] = 0;
100568- atomic_long_add(v, &zone->vm_stat[i]);
100569+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100570 global_diff[i] += v;
100571 }
100572 }
100573@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100574 if (pset->vm_stat_diff[i]) {
100575 int v = pset->vm_stat_diff[i];
100576 pset->vm_stat_diff[i] = 0;
100577- atomic_long_add(v, &zone->vm_stat[i]);
100578- atomic_long_add(v, &vm_stat[i]);
100579+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100580+ atomic_long_add_unchecked(v, &vm_stat[i]);
100581 }
100582 }
100583 #endif
100584@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100585 stat_items_size += sizeof(struct vm_event_state);
100586 #endif
100587
100588- v = kmalloc(stat_items_size, GFP_KERNEL);
100589+ v = kzalloc(stat_items_size, GFP_KERNEL);
100590 m->private = v;
100591 if (!v)
100592 return ERR_PTR(-ENOMEM);
100593+
100594+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100595+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100596+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100597+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100598+ && !in_group_p(grsec_proc_gid)
100599+#endif
100600+ )
100601+ return (unsigned long *)m->private + *pos;
100602+#endif
100603+#endif
100604+
100605 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100606 v[i] = global_page_state(i);
100607 v += NR_VM_ZONE_STAT_ITEMS;
100608@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100609 cpu_notifier_register_done();
100610 #endif
100611 #ifdef CONFIG_PROC_FS
100612- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100613- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100614- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100615- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100616+ {
100617+ mode_t gr_mode = S_IRUGO;
100618+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100619+ gr_mode = S_IRUSR;
100620+#endif
100621+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100622+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100623+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100624+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100625+ }
100626 #endif
100627 return 0;
100628 }
100629diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100630index 64c6bed..b79a5de 100644
100631--- a/net/8021q/vlan.c
100632+++ b/net/8021q/vlan.c
100633@@ -481,7 +481,7 @@ out:
100634 return NOTIFY_DONE;
100635 }
100636
100637-static struct notifier_block vlan_notifier_block __read_mostly = {
100638+static struct notifier_block vlan_notifier_block = {
100639 .notifier_call = vlan_device_event,
100640 };
100641
100642@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100643 err = -EPERM;
100644 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100645 break;
100646- if ((args.u.name_type >= 0) &&
100647- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100648+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100649 struct vlan_net *vn;
100650
100651 vn = net_generic(net, vlan_net_id);
100652diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100653index 8ac8a5c..991defc 100644
100654--- a/net/8021q/vlan_netlink.c
100655+++ b/net/8021q/vlan_netlink.c
100656@@ -238,7 +238,7 @@ nla_put_failure:
100657 return -EMSGSIZE;
100658 }
100659
100660-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100661+struct rtnl_link_ops vlan_link_ops = {
100662 .kind = "vlan",
100663 .maxtype = IFLA_VLAN_MAX,
100664 .policy = vlan_policy,
100665diff --git a/net/9p/client.c b/net/9p/client.c
100666index e86a9bea..e91f70e 100644
100667--- a/net/9p/client.c
100668+++ b/net/9p/client.c
100669@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100670 len - inline_len);
100671 } else {
100672 err = copy_from_user(ename + inline_len,
100673- uidata, len - inline_len);
100674+ (char __force_user *)uidata, len - inline_len);
100675 if (err) {
100676 err = -EFAULT;
100677 goto out_err;
100678@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100679 kernel_buf = 1;
100680 indata = data;
100681 } else
100682- indata = (__force char *)udata;
100683+ indata = (__force_kernel char *)udata;
100684 /*
100685 * response header len is 11
100686 * PDU Header(7) + IO Size (4)
100687@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100688 kernel_buf = 1;
100689 odata = data;
100690 } else
100691- odata = (char *)udata;
100692+ odata = (char __force_kernel *)udata;
100693 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100694 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100695 fid->fid, offset, rsize);
100696diff --git a/net/9p/mod.c b/net/9p/mod.c
100697index 6ab36ae..6f1841b 100644
100698--- a/net/9p/mod.c
100699+++ b/net/9p/mod.c
100700@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100701 void v9fs_register_trans(struct p9_trans_module *m)
100702 {
100703 spin_lock(&v9fs_trans_lock);
100704- list_add_tail(&m->list, &v9fs_trans_list);
100705+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100706 spin_unlock(&v9fs_trans_lock);
100707 }
100708 EXPORT_SYMBOL(v9fs_register_trans);
100709@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100710 void v9fs_unregister_trans(struct p9_trans_module *m)
100711 {
100712 spin_lock(&v9fs_trans_lock);
100713- list_del_init(&m->list);
100714+ pax_list_del_init((struct list_head *)&m->list);
100715 spin_unlock(&v9fs_trans_lock);
100716 }
100717 EXPORT_SYMBOL(v9fs_unregister_trans);
100718diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100719index 80d08f6..de63fd1 100644
100720--- a/net/9p/trans_fd.c
100721+++ b/net/9p/trans_fd.c
100722@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100723 oldfs = get_fs();
100724 set_fs(get_ds());
100725 /* The cast to a user pointer is valid due to the set_fs() */
100726- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100727+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100728 set_fs(oldfs);
100729
100730 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100731diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100732index af46bc4..f9adfcd 100644
100733--- a/net/appletalk/atalk_proc.c
100734+++ b/net/appletalk/atalk_proc.c
100735@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100736 struct proc_dir_entry *p;
100737 int rc = -ENOMEM;
100738
100739- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100740+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100741 if (!atalk_proc_dir)
100742 goto out;
100743
100744diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100745index 876fbe8..8bbea9f 100644
100746--- a/net/atm/atm_misc.c
100747+++ b/net/atm/atm_misc.c
100748@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100749 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100750 return 1;
100751 atm_return(vcc, truesize);
100752- atomic_inc(&vcc->stats->rx_drop);
100753+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100754 return 0;
100755 }
100756 EXPORT_SYMBOL(atm_charge);
100757@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100758 }
100759 }
100760 atm_return(vcc, guess);
100761- atomic_inc(&vcc->stats->rx_drop);
100762+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100763 return NULL;
100764 }
100765 EXPORT_SYMBOL(atm_alloc_charge);
100766@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100767
100768 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100769 {
100770-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100771+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100772 __SONET_ITEMS
100773 #undef __HANDLE_ITEM
100774 }
100775@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100776
100777 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100778 {
100779-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100780+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100781 __SONET_ITEMS
100782 #undef __HANDLE_ITEM
100783 }
100784diff --git a/net/atm/lec.c b/net/atm/lec.c
100785index 4b98f89..5a2f6cb 100644
100786--- a/net/atm/lec.c
100787+++ b/net/atm/lec.c
100788@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100789 }
100790
100791 static struct lane2_ops lane2_ops = {
100792- lane2_resolve, /* resolve, spec 3.1.3 */
100793- lane2_associate_req, /* associate_req, spec 3.1.4 */
100794- NULL /* associate indicator, spec 3.1.5 */
100795+ .resolve = lane2_resolve,
100796+ .associate_req = lane2_associate_req,
100797+ .associate_indicator = NULL
100798 };
100799
100800 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100801diff --git a/net/atm/lec.h b/net/atm/lec.h
100802index 4149db1..f2ab682 100644
100803--- a/net/atm/lec.h
100804+++ b/net/atm/lec.h
100805@@ -48,7 +48,7 @@ struct lane2_ops {
100806 const u8 *tlvs, u32 sizeoftlvs);
100807 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100808 const u8 *tlvs, u32 sizeoftlvs);
100809-};
100810+} __no_const;
100811
100812 /*
100813 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100814diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100815index d1b2d9a..d549f7f 100644
100816--- a/net/atm/mpoa_caches.c
100817+++ b/net/atm/mpoa_caches.c
100818@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100819
100820
100821 static struct in_cache_ops ingress_ops = {
100822- in_cache_add_entry, /* add_entry */
100823- in_cache_get, /* get */
100824- in_cache_get_with_mask, /* get_with_mask */
100825- in_cache_get_by_vcc, /* get_by_vcc */
100826- in_cache_put, /* put */
100827- in_cache_remove_entry, /* remove_entry */
100828- cache_hit, /* cache_hit */
100829- clear_count_and_expired, /* clear_count */
100830- check_resolving_entries, /* check_resolving */
100831- refresh_entries, /* refresh */
100832- in_destroy_cache /* destroy_cache */
100833+ .add_entry = in_cache_add_entry,
100834+ .get = in_cache_get,
100835+ .get_with_mask = in_cache_get_with_mask,
100836+ .get_by_vcc = in_cache_get_by_vcc,
100837+ .put = in_cache_put,
100838+ .remove_entry = in_cache_remove_entry,
100839+ .cache_hit = cache_hit,
100840+ .clear_count = clear_count_and_expired,
100841+ .check_resolving = check_resolving_entries,
100842+ .refresh = refresh_entries,
100843+ .destroy_cache = in_destroy_cache
100844 };
100845
100846 static struct eg_cache_ops egress_ops = {
100847- eg_cache_add_entry, /* add_entry */
100848- eg_cache_get_by_cache_id, /* get_by_cache_id */
100849- eg_cache_get_by_tag, /* get_by_tag */
100850- eg_cache_get_by_vcc, /* get_by_vcc */
100851- eg_cache_get_by_src_ip, /* get_by_src_ip */
100852- eg_cache_put, /* put */
100853- eg_cache_remove_entry, /* remove_entry */
100854- update_eg_cache_entry, /* update */
100855- clear_expired, /* clear_expired */
100856- eg_destroy_cache /* destroy_cache */
100857+ .add_entry = eg_cache_add_entry,
100858+ .get_by_cache_id = eg_cache_get_by_cache_id,
100859+ .get_by_tag = eg_cache_get_by_tag,
100860+ .get_by_vcc = eg_cache_get_by_vcc,
100861+ .get_by_src_ip = eg_cache_get_by_src_ip,
100862+ .put = eg_cache_put,
100863+ .remove_entry = eg_cache_remove_entry,
100864+ .update = update_eg_cache_entry,
100865+ .clear_expired = clear_expired,
100866+ .destroy_cache = eg_destroy_cache
100867 };
100868
100869
100870diff --git a/net/atm/proc.c b/net/atm/proc.c
100871index bbb6461..cf04016 100644
100872--- a/net/atm/proc.c
100873+++ b/net/atm/proc.c
100874@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100875 const struct k_atm_aal_stats *stats)
100876 {
100877 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100878- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100879- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100880- atomic_read(&stats->rx_drop));
100881+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100882+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100883+ atomic_read_unchecked(&stats->rx_drop));
100884 }
100885
100886 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100887diff --git a/net/atm/resources.c b/net/atm/resources.c
100888index 0447d5d..3cf4728 100644
100889--- a/net/atm/resources.c
100890+++ b/net/atm/resources.c
100891@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100892 static void copy_aal_stats(struct k_atm_aal_stats *from,
100893 struct atm_aal_stats *to)
100894 {
100895-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100896+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100897 __AAL_STAT_ITEMS
100898 #undef __HANDLE_ITEM
100899 }
100900@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100901 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100902 struct atm_aal_stats *to)
100903 {
100904-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100905+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100906 __AAL_STAT_ITEMS
100907 #undef __HANDLE_ITEM
100908 }
100909diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100910index 919a5ce..cc6b444 100644
100911--- a/net/ax25/sysctl_net_ax25.c
100912+++ b/net/ax25/sysctl_net_ax25.c
100913@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100914 {
100915 char path[sizeof("net/ax25/") + IFNAMSIZ];
100916 int k;
100917- struct ctl_table *table;
100918+ ctl_table_no_const *table;
100919
100920 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100921 if (!table)
100922diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100923index 1e80539..676c37a 100644
100924--- a/net/batman-adv/bat_iv_ogm.c
100925+++ b/net/batman-adv/bat_iv_ogm.c
100926@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100927
100928 /* randomize initial seqno to avoid collision */
100929 get_random_bytes(&random_seqno, sizeof(random_seqno));
100930- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100931+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100932
100933 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100934 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100935@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100936 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100937
100938 /* change sequence number to network order */
100939- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100940+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100941 batadv_ogm_packet->seqno = htonl(seqno);
100942- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100943+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100944
100945 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100946
100947@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100948 return;
100949
100950 /* could be changed by schedule_own_packet() */
100951- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100952+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100953
100954 if (ogm_packet->flags & BATADV_DIRECTLINK)
100955 has_directlink_flag = true;
100956diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100957index 00f9e14..e1c7203 100644
100958--- a/net/batman-adv/fragmentation.c
100959+++ b/net/batman-adv/fragmentation.c
100960@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100961 frag_header.packet_type = BATADV_UNICAST_FRAG;
100962 frag_header.version = BATADV_COMPAT_VERSION;
100963 frag_header.ttl = BATADV_TTL;
100964- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100965+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100966 frag_header.reserved = 0;
100967 frag_header.no = 0;
100968 frag_header.total_size = htons(skb->len);
100969diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100970index 5467955..75ad4e3 100644
100971--- a/net/batman-adv/soft-interface.c
100972+++ b/net/batman-adv/soft-interface.c
100973@@ -296,7 +296,7 @@ send:
100974 primary_if->net_dev->dev_addr);
100975
100976 /* set broadcast sequence number */
100977- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100978+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100979 bcast_packet->seqno = htonl(seqno);
100980
100981 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100982@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100983 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100984
100985 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100986- atomic_set(&bat_priv->bcast_seqno, 1);
100987+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100988 atomic_set(&bat_priv->tt.vn, 0);
100989 atomic_set(&bat_priv->tt.local_changes, 0);
100990 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100991@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100992
100993 /* randomize initial seqno to avoid collision */
100994 get_random_bytes(&random_seqno, sizeof(random_seqno));
100995- atomic_set(&bat_priv->frag_seqno, random_seqno);
100996+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100997
100998 bat_priv->primary_if = NULL;
100999 bat_priv->num_ifaces = 0;
101000@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
101001 return 0;
101002 }
101003
101004-struct rtnl_link_ops batadv_link_ops __read_mostly = {
101005+struct rtnl_link_ops batadv_link_ops = {
101006 .kind = "batadv",
101007 .priv_size = sizeof(struct batadv_priv),
101008 .setup = batadv_softif_init_early,
101009diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101010index 8854c05..ee5d5497 100644
101011--- a/net/batman-adv/types.h
101012+++ b/net/batman-adv/types.h
101013@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101014 struct batadv_hard_iface_bat_iv {
101015 unsigned char *ogm_buff;
101016 int ogm_buff_len;
101017- atomic_t ogm_seqno;
101018+ atomic_unchecked_t ogm_seqno;
101019 };
101020
101021 /**
101022@@ -768,7 +768,7 @@ struct batadv_priv {
101023 atomic_t bonding;
101024 atomic_t fragmentation;
101025 atomic_t packet_size_max;
101026- atomic_t frag_seqno;
101027+ atomic_unchecked_t frag_seqno;
101028 #ifdef CONFIG_BATMAN_ADV_BLA
101029 atomic_t bridge_loop_avoidance;
101030 #endif
101031@@ -787,7 +787,7 @@ struct batadv_priv {
101032 #endif
101033 uint32_t isolation_mark;
101034 uint32_t isolation_mark_mask;
101035- atomic_t bcast_seqno;
101036+ atomic_unchecked_t bcast_seqno;
101037 atomic_t bcast_queue_left;
101038 atomic_t batman_queue_left;
101039 char num_ifaces;
101040diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101041index 2c245fd..dccf543 100644
101042--- a/net/bluetooth/hci_sock.c
101043+++ b/net/bluetooth/hci_sock.c
101044@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101045 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101046 }
101047
101048- len = min_t(unsigned int, len, sizeof(uf));
101049+ len = min((size_t)len, sizeof(uf));
101050 if (copy_from_user(&uf, optval, len)) {
101051 err = -EFAULT;
101052 break;
101053diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101054index d04dc00..d25d576 100644
101055--- a/net/bluetooth/l2cap_core.c
101056+++ b/net/bluetooth/l2cap_core.c
101057@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101058 break;
101059
101060 case L2CAP_CONF_RFC:
101061- if (olen == sizeof(rfc))
101062- memcpy(&rfc, (void *)val, olen);
101063+ if (olen != sizeof(rfc))
101064+ break;
101065+
101066+ memcpy(&rfc, (void *)val, olen);
101067
101068 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101069 rfc.mode != chan->mode)
101070diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101071index f65caf4..c07110c 100644
101072--- a/net/bluetooth/l2cap_sock.c
101073+++ b/net/bluetooth/l2cap_sock.c
101074@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101075 struct sock *sk = sock->sk;
101076 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101077 struct l2cap_options opts;
101078- int len, err = 0;
101079+ int err = 0;
101080+ size_t len = optlen;
101081 u32 opt;
101082
101083 BT_DBG("sk %p", sk);
101084@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101085 opts.max_tx = chan->max_tx;
101086 opts.txwin_size = chan->tx_win;
101087
101088- len = min_t(unsigned int, sizeof(opts), optlen);
101089+ len = min(sizeof(opts), len);
101090 if (copy_from_user((char *) &opts, optval, len)) {
101091 err = -EFAULT;
101092 break;
101093@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101094 struct bt_security sec;
101095 struct bt_power pwr;
101096 struct l2cap_conn *conn;
101097- int len, err = 0;
101098+ int err = 0;
101099+ size_t len = optlen;
101100 u32 opt;
101101
101102 BT_DBG("sk %p", sk);
101103@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101104
101105 sec.level = BT_SECURITY_LOW;
101106
101107- len = min_t(unsigned int, sizeof(sec), optlen);
101108+ len = min(sizeof(sec), len);
101109 if (copy_from_user((char *) &sec, optval, len)) {
101110 err = -EFAULT;
101111 break;
101112@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101113
101114 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101115
101116- len = min_t(unsigned int, sizeof(pwr), optlen);
101117+ len = min(sizeof(pwr), len);
101118 if (copy_from_user((char *) &pwr, optval, len)) {
101119 err = -EFAULT;
101120 break;
101121diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101122index 2348176..b9b6cf2 100644
101123--- a/net/bluetooth/rfcomm/sock.c
101124+++ b/net/bluetooth/rfcomm/sock.c
101125@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101126 struct sock *sk = sock->sk;
101127 struct bt_security sec;
101128 int err = 0;
101129- size_t len;
101130+ size_t len = optlen;
101131 u32 opt;
101132
101133 BT_DBG("sk %p", sk);
101134@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101135
101136 sec.level = BT_SECURITY_LOW;
101137
101138- len = min_t(unsigned int, sizeof(sec), optlen);
101139+ len = min(sizeof(sec), len);
101140 if (copy_from_user((char *) &sec, optval, len)) {
101141 err = -EFAULT;
101142 break;
101143diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101144index 8e385a0..a5bdd8e 100644
101145--- a/net/bluetooth/rfcomm/tty.c
101146+++ b/net/bluetooth/rfcomm/tty.c
101147@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101148 BT_DBG("tty %p id %d", tty, tty->index);
101149
101150 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101151- dev->channel, dev->port.count);
101152+ dev->channel, atomic_read(&dev->port.count));
101153
101154 err = tty_port_open(&dev->port, tty, filp);
101155 if (err)
101156@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101157 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101158
101159 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101160- dev->port.count);
101161+ atomic_read(&dev->port.count));
101162
101163 tty_port_close(&dev->port, tty, filp);
101164 }
101165diff --git a/net/bridge/br.c b/net/bridge/br.c
101166index 44425af..4ee730e 100644
101167--- a/net/bridge/br.c
101168+++ b/net/bridge/br.c
101169@@ -147,6 +147,8 @@ static int __init br_init(void)
101170 {
101171 int err;
101172
101173+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
101174+
101175 err = stp_proto_register(&br_stp_proto);
101176 if (err < 0) {
101177 pr_err("bridge: can't register sap for STP\n");
101178diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101179index 9f5eb55..45ab9c5 100644
101180--- a/net/bridge/br_netlink.c
101181+++ b/net/bridge/br_netlink.c
101182@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
101183 .get_link_af_size = br_get_link_af_size,
101184 };
101185
101186-struct rtnl_link_ops br_link_ops __read_mostly = {
101187+struct rtnl_link_ops br_link_ops = {
101188 .kind = "bridge",
101189 .priv_size = sizeof(struct net_bridge),
101190 .setup = br_dev_setup,
101191diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101192index d9a8c05..8dadc6c6 100644
101193--- a/net/bridge/netfilter/ebtables.c
101194+++ b/net/bridge/netfilter/ebtables.c
101195@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101196 tmp.valid_hooks = t->table->valid_hooks;
101197 }
101198 mutex_unlock(&ebt_mutex);
101199- if (copy_to_user(user, &tmp, *len) != 0) {
101200+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101201 BUGPRINT("c2u Didn't work\n");
101202 ret = -EFAULT;
101203 break;
101204@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101205 goto out;
101206 tmp.valid_hooks = t->valid_hooks;
101207
101208- if (copy_to_user(user, &tmp, *len) != 0) {
101209+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101210 ret = -EFAULT;
101211 break;
101212 }
101213@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101214 tmp.entries_size = t->table->entries_size;
101215 tmp.valid_hooks = t->table->valid_hooks;
101216
101217- if (copy_to_user(user, &tmp, *len) != 0) {
101218+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101219 ret = -EFAULT;
101220 break;
101221 }
101222diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101223index f5afda1..dcf770a 100644
101224--- a/net/caif/cfctrl.c
101225+++ b/net/caif/cfctrl.c
101226@@ -10,6 +10,7 @@
101227 #include <linux/spinlock.h>
101228 #include <linux/slab.h>
101229 #include <linux/pkt_sched.h>
101230+#include <linux/sched.h>
101231 #include <net/caif/caif_layer.h>
101232 #include <net/caif/cfpkt.h>
101233 #include <net/caif/cfctrl.h>
101234@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101235 memset(&dev_info, 0, sizeof(dev_info));
101236 dev_info.id = 0xff;
101237 cfsrvl_init(&this->serv, 0, &dev_info, false);
101238- atomic_set(&this->req_seq_no, 1);
101239- atomic_set(&this->rsp_seq_no, 1);
101240+ atomic_set_unchecked(&this->req_seq_no, 1);
101241+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101242 this->serv.layer.receive = cfctrl_recv;
101243 sprintf(this->serv.layer.name, "ctrl");
101244 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101245@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101246 struct cfctrl_request_info *req)
101247 {
101248 spin_lock_bh(&ctrl->info_list_lock);
101249- atomic_inc(&ctrl->req_seq_no);
101250- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101251+ atomic_inc_unchecked(&ctrl->req_seq_no);
101252+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101253 list_add_tail(&req->list, &ctrl->list);
101254 spin_unlock_bh(&ctrl->info_list_lock);
101255 }
101256@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101257 if (p != first)
101258 pr_warn("Requests are not received in order\n");
101259
101260- atomic_set(&ctrl->rsp_seq_no,
101261+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101262 p->sequence_no);
101263 list_del(&p->list);
101264 goto out;
101265diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101266index 67a4a36..8d28068 100644
101267--- a/net/caif/chnl_net.c
101268+++ b/net/caif/chnl_net.c
101269@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101270 };
101271
101272
101273-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101274+static struct rtnl_link_ops ipcaif_link_ops = {
101275 .kind = "caif",
101276 .priv_size = sizeof(struct chnl_net),
101277 .setup = ipcaif_net_setup,
101278diff --git a/net/can/af_can.c b/net/can/af_can.c
101279index 32d710e..93bcf05 100644
101280--- a/net/can/af_can.c
101281+++ b/net/can/af_can.c
101282@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101283 };
101284
101285 /* notifier block for netdevice event */
101286-static struct notifier_block can_netdev_notifier __read_mostly = {
101287+static struct notifier_block can_netdev_notifier = {
101288 .notifier_call = can_notifier,
101289 };
101290
101291diff --git a/net/can/bcm.c b/net/can/bcm.c
101292index ee9ffd9..dfdf3d4 100644
101293--- a/net/can/bcm.c
101294+++ b/net/can/bcm.c
101295@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101296 }
101297
101298 /* create /proc/net/can-bcm directory */
101299- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101300+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101301 return 0;
101302 }
101303
101304diff --git a/net/can/gw.c b/net/can/gw.c
101305index 295f62e..0c3b09e 100644
101306--- a/net/can/gw.c
101307+++ b/net/can/gw.c
101308@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101309 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101310
101311 static HLIST_HEAD(cgw_list);
101312-static struct notifier_block notifier;
101313
101314 static struct kmem_cache *cgw_cache __read_mostly;
101315
101316@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101317 return err;
101318 }
101319
101320+static struct notifier_block notifier = {
101321+ .notifier_call = cgw_notifier
101322+};
101323+
101324 static __init int cgw_module_init(void)
101325 {
101326 /* sanitize given module parameter */
101327@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101328 return -ENOMEM;
101329
101330 /* set notifier */
101331- notifier.notifier_call = cgw_notifier;
101332 register_netdevice_notifier(&notifier);
101333
101334 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101335diff --git a/net/can/proc.c b/net/can/proc.c
101336index 1a19b98..df2b4ec 100644
101337--- a/net/can/proc.c
101338+++ b/net/can/proc.c
101339@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101340 void can_init_proc(void)
101341 {
101342 /* create /proc/net/can directory */
101343- can_dir = proc_mkdir("can", init_net.proc_net);
101344+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101345
101346 if (!can_dir) {
101347 printk(KERN_INFO "can: failed to create /proc/net/can . "
101348diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101349index 33a2f20..371bd09 100644
101350--- a/net/ceph/messenger.c
101351+++ b/net/ceph/messenger.c
101352@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101353 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101354
101355 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101356-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101357+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101358
101359 static struct page *zero_page; /* used in certain error cases */
101360
101361@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101362 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101363 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101364
101365- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101366+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101367 s = addr_str[i];
101368
101369 switch (ss->ss_family) {
101370diff --git a/net/compat.c b/net/compat.c
101371index f7bd286..76ea56a 100644
101372--- a/net/compat.c
101373+++ b/net/compat.c
101374@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101375
101376 #define CMSG_COMPAT_FIRSTHDR(msg) \
101377 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101378- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101379+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101380 (struct compat_cmsghdr __user *)NULL)
101381
101382 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101383 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101384 (ucmlen) <= (unsigned long) \
101385 ((mhdr)->msg_controllen - \
101386- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101387+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101388
101389 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101390 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101391 {
101392 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101393- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101394+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101395 msg->msg_controllen)
101396 return NULL;
101397 return (struct compat_cmsghdr __user *)ptr;
101398@@ -203,7 +203,7 @@ Efault:
101399
101400 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101401 {
101402- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101403+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101404 struct compat_cmsghdr cmhdr;
101405 struct compat_timeval ctv;
101406 struct compat_timespec cts[3];
101407@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101408
101409 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101410 {
101411- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101412+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101413 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101414 int fdnum = scm->fp->count;
101415 struct file **fp = scm->fp->fp;
101416@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101417 return -EFAULT;
101418 old_fs = get_fs();
101419 set_fs(KERNEL_DS);
101420- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101421+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101422 set_fs(old_fs);
101423
101424 return err;
101425@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101426 len = sizeof(ktime);
101427 old_fs = get_fs();
101428 set_fs(KERNEL_DS);
101429- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101430+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101431 set_fs(old_fs);
101432
101433 if (!err) {
101434@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101435 case MCAST_JOIN_GROUP:
101436 case MCAST_LEAVE_GROUP:
101437 {
101438- struct compat_group_req __user *gr32 = (void *)optval;
101439+ struct compat_group_req __user *gr32 = (void __user *)optval;
101440 struct group_req __user *kgr =
101441 compat_alloc_user_space(sizeof(struct group_req));
101442 u32 interface;
101443@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101444 case MCAST_BLOCK_SOURCE:
101445 case MCAST_UNBLOCK_SOURCE:
101446 {
101447- struct compat_group_source_req __user *gsr32 = (void *)optval;
101448+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101449 struct group_source_req __user *kgsr = compat_alloc_user_space(
101450 sizeof(struct group_source_req));
101451 u32 interface;
101452@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101453 }
101454 case MCAST_MSFILTER:
101455 {
101456- struct compat_group_filter __user *gf32 = (void *)optval;
101457+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101458 struct group_filter __user *kgf;
101459 u32 interface, fmode, numsrc;
101460
101461@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101462 char __user *optval, int __user *optlen,
101463 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101464 {
101465- struct compat_group_filter __user *gf32 = (void *)optval;
101466+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101467 struct group_filter __user *kgf;
101468 int __user *koptlen;
101469 u32 interface, fmode, numsrc;
101470@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101471
101472 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101473 return -EINVAL;
101474- if (copy_from_user(a, args, nas[call]))
101475+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101476 return -EFAULT;
101477 a0 = a[0];
101478 a1 = a[1];
101479diff --git a/net/core/datagram.c b/net/core/datagram.c
101480index df493d6..1145766 100644
101481--- a/net/core/datagram.c
101482+++ b/net/core/datagram.c
101483@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101484 }
101485
101486 kfree_skb(skb);
101487- atomic_inc(&sk->sk_drops);
101488+ atomic_inc_unchecked(&sk->sk_drops);
101489 sk_mem_reclaim_partial(sk);
101490
101491 return err;
101492diff --git a/net/core/dev.c b/net/core/dev.c
101493index 4ff46f8..e877e78 100644
101494--- a/net/core/dev.c
101495+++ b/net/core/dev.c
101496@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101497 {
101498 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101499 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101500- atomic_long_inc(&dev->rx_dropped);
101501+ atomic_long_inc_unchecked(&dev->rx_dropped);
101502 kfree_skb(skb);
101503 return NET_RX_DROP;
101504 }
101505 }
101506
101507 if (unlikely(!is_skb_forwardable(dev, skb))) {
101508- atomic_long_inc(&dev->rx_dropped);
101509+ atomic_long_inc_unchecked(&dev->rx_dropped);
101510 kfree_skb(skb);
101511 return NET_RX_DROP;
101512 }
101513@@ -2958,7 +2958,7 @@ recursion_alert:
101514 drop:
101515 rcu_read_unlock_bh();
101516
101517- atomic_long_inc(&dev->tx_dropped);
101518+ atomic_long_inc_unchecked(&dev->tx_dropped);
101519 kfree_skb_list(skb);
101520 return rc;
101521 out:
101522@@ -3301,7 +3301,7 @@ enqueue:
101523
101524 local_irq_restore(flags);
101525
101526- atomic_long_inc(&skb->dev->rx_dropped);
101527+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101528 kfree_skb(skb);
101529 return NET_RX_DROP;
101530 }
101531@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101532 }
101533 EXPORT_SYMBOL(netif_rx_ni);
101534
101535-static void net_tx_action(struct softirq_action *h)
101536+static __latent_entropy void net_tx_action(void)
101537 {
101538 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101539
101540@@ -3711,7 +3711,7 @@ ncls:
101541 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101542 } else {
101543 drop:
101544- atomic_long_inc(&skb->dev->rx_dropped);
101545+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101546 kfree_skb(skb);
101547 /* Jamal, now you will not able to escape explaining
101548 * me how you were going to use this. :-)
101549@@ -4599,7 +4599,7 @@ out_unlock:
101550 return work;
101551 }
101552
101553-static void net_rx_action(struct softirq_action *h)
101554+static __latent_entropy void net_rx_action(void)
101555 {
101556 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101557 unsigned long time_limit = jiffies + 2;
101558@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101559 } else {
101560 netdev_stats_to_stats64(storage, &dev->stats);
101561 }
101562- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101563- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101564+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101565+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101566 return storage;
101567 }
101568 EXPORT_SYMBOL(dev_get_stats);
101569diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101570index b94b1d2..da3ed7c 100644
101571--- a/net/core/dev_ioctl.c
101572+++ b/net/core/dev_ioctl.c
101573@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101574 no_module = !dev;
101575 if (no_module && capable(CAP_NET_ADMIN))
101576 no_module = request_module("netdev-%s", name);
101577- if (no_module && capable(CAP_SYS_MODULE))
101578+ if (no_module && capable(CAP_SYS_MODULE)) {
101579+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101580+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101581+#else
101582 request_module("%s", name);
101583+#endif
101584+ }
101585 }
101586 EXPORT_SYMBOL(dev_load);
101587
101588diff --git a/net/core/filter.c b/net/core/filter.c
101589index ec9baea..dd6195d 100644
101590--- a/net/core/filter.c
101591+++ b/net/core/filter.c
101592@@ -533,7 +533,11 @@ do_pass:
101593
101594 /* Unkown instruction. */
101595 default:
101596- goto err;
101597+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101598+ fp->code, fp->jt, fp->jf, fp->k);
101599+ kfree(addrs);
101600+ BUG();
101601+ return -EINVAL;
101602 }
101603
101604 insn++;
101605@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101606 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101607 int pc, ret = 0;
101608
101609- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101610+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101611
101612 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101613 if (!masks)
101614@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101615 if (!fp)
101616 return -ENOMEM;
101617
101618- memcpy(fp->insns, fprog->filter, fsize);
101619+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101620
101621 fp->len = fprog->len;
101622 /* Since unattached filters are not copied back to user
101623diff --git a/net/core/flow.c b/net/core/flow.c
101624index 1033725..340f65d 100644
101625--- a/net/core/flow.c
101626+++ b/net/core/flow.c
101627@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101628 static int flow_entry_valid(struct flow_cache_entry *fle,
101629 struct netns_xfrm *xfrm)
101630 {
101631- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101632+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101633 return 0;
101634 if (fle->object && !fle->object->ops->check(fle->object))
101635 return 0;
101636@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101637 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101638 fcp->hash_count++;
101639 }
101640- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101641+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101642 flo = fle->object;
101643 if (!flo)
101644 goto ret_object;
101645@@ -263,7 +263,7 @@ nocache:
101646 }
101647 flo = resolver(net, key, family, dir, flo, ctx);
101648 if (fle) {
101649- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101650+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101651 if (!IS_ERR(flo))
101652 fle->object = flo;
101653 else
101654diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101655index 8d614c9..55752ea 100644
101656--- a/net/core/neighbour.c
101657+++ b/net/core/neighbour.c
101658@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101659 void __user *buffer, size_t *lenp, loff_t *ppos)
101660 {
101661 int size, ret;
101662- struct ctl_table tmp = *ctl;
101663+ ctl_table_no_const tmp = *ctl;
101664
101665 tmp.extra1 = &zero;
101666 tmp.extra2 = &unres_qlen_max;
101667@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101668 void __user *buffer,
101669 size_t *lenp, loff_t *ppos)
101670 {
101671- struct ctl_table tmp = *ctl;
101672+ ctl_table_no_const tmp = *ctl;
101673 int ret;
101674
101675 tmp.extra1 = &zero;
101676diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101677index 2bf8329..2eb1423 100644
101678--- a/net/core/net-procfs.c
101679+++ b/net/core/net-procfs.c
101680@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101681 struct rtnl_link_stats64 temp;
101682 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101683
101684- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101685+ if (gr_proc_is_restricted())
101686+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101687+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101688+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101689+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101690+ else
101691+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101692 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101693 dev->name, stats->rx_bytes, stats->rx_packets,
101694 stats->rx_errors,
101695@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101696 return 0;
101697 }
101698
101699-static const struct seq_operations dev_seq_ops = {
101700+const struct seq_operations dev_seq_ops = {
101701 .start = dev_seq_start,
101702 .next = dev_seq_next,
101703 .stop = dev_seq_stop,
101704@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101705
101706 static int softnet_seq_open(struct inode *inode, struct file *file)
101707 {
101708- return seq_open(file, &softnet_seq_ops);
101709+ return seq_open_restrict(file, &softnet_seq_ops);
101710 }
101711
101712 static const struct file_operations softnet_seq_fops = {
101713@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101714 else
101715 seq_printf(seq, "%04x", ntohs(pt->type));
101716
101717+#ifdef CONFIG_GRKERNSEC_HIDESYM
101718+ seq_printf(seq, " %-8s %pf\n",
101719+ pt->dev ? pt->dev->name : "", NULL);
101720+#else
101721 seq_printf(seq, " %-8s %pf\n",
101722 pt->dev ? pt->dev->name : "", pt->func);
101723+#endif
101724 }
101725
101726 return 0;
101727diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101728index 9993412..2a4672b 100644
101729--- a/net/core/net-sysfs.c
101730+++ b/net/core/net-sysfs.c
101731@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101732 {
101733 struct net_device *netdev = to_net_dev(dev);
101734 return sprintf(buf, fmt_dec,
101735- atomic_read(&netdev->carrier_changes));
101736+ atomic_read_unchecked(&netdev->carrier_changes));
101737 }
101738 static DEVICE_ATTR_RO(carrier_changes);
101739
101740diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101741index ce780c7..6d296b3 100644
101742--- a/net/core/net_namespace.c
101743+++ b/net/core/net_namespace.c
101744@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101745 int error;
101746 LIST_HEAD(net_exit_list);
101747
101748- list_add_tail(&ops->list, list);
101749+ pax_list_add_tail((struct list_head *)&ops->list, list);
101750 if (ops->init || (ops->id && ops->size)) {
101751 for_each_net(net) {
101752 error = ops_init(ops, net);
101753@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101754
101755 out_undo:
101756 /* If I have an error cleanup all namespaces I initialized */
101757- list_del(&ops->list);
101758+ pax_list_del((struct list_head *)&ops->list);
101759 ops_exit_list(ops, &net_exit_list);
101760 ops_free_list(ops, &net_exit_list);
101761 return error;
101762@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101763 struct net *net;
101764 LIST_HEAD(net_exit_list);
101765
101766- list_del(&ops->list);
101767+ pax_list_del((struct list_head *)&ops->list);
101768 for_each_net(net)
101769 list_add_tail(&net->exit_list, &net_exit_list);
101770 ops_exit_list(ops, &net_exit_list);
101771@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101772 mutex_lock(&net_mutex);
101773 error = register_pernet_operations(&pernet_list, ops);
101774 if (!error && (first_device == &pernet_list))
101775- first_device = &ops->list;
101776+ first_device = (struct list_head *)&ops->list;
101777 mutex_unlock(&net_mutex);
101778 return error;
101779 }
101780diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101781index e0ad5d1..04fa7f7 100644
101782--- a/net/core/netpoll.c
101783+++ b/net/core/netpoll.c
101784@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101785 struct udphdr *udph;
101786 struct iphdr *iph;
101787 struct ethhdr *eth;
101788- static atomic_t ip_ident;
101789+ static atomic_unchecked_t ip_ident;
101790 struct ipv6hdr *ip6h;
101791
101792 udp_len = len + sizeof(*udph);
101793@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101794 put_unaligned(0x45, (unsigned char *)iph);
101795 iph->tos = 0;
101796 put_unaligned(htons(ip_len), &(iph->tot_len));
101797- iph->id = htons(atomic_inc_return(&ip_ident));
101798+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101799 iph->frag_off = 0;
101800 iph->ttl = 64;
101801 iph->protocol = IPPROTO_UDP;
101802diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101803index 352d183..1bddfaf 100644
101804--- a/net/core/pktgen.c
101805+++ b/net/core/pktgen.c
101806@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
101807 pn->net = net;
101808 INIT_LIST_HEAD(&pn->pktgen_threads);
101809 pn->pktgen_exiting = false;
101810- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101811+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101812 if (!pn->proc_dir) {
101813 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101814 return -ENODEV;
101815diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101816index 76ec6c5..9cfb81c 100644
101817--- a/net/core/rtnetlink.c
101818+++ b/net/core/rtnetlink.c
101819@@ -60,7 +60,7 @@ struct rtnl_link {
101820 rtnl_doit_func doit;
101821 rtnl_dumpit_func dumpit;
101822 rtnl_calcit_func calcit;
101823-};
101824+} __no_const;
101825
101826 static DEFINE_MUTEX(rtnl_mutex);
101827
101828@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101829 * to use the ops for creating device. So do not
101830 * fill up dellink as well. That disables rtnl_dellink.
101831 */
101832- if (ops->setup && !ops->dellink)
101833- ops->dellink = unregister_netdevice_queue;
101834+ if (ops->setup && !ops->dellink) {
101835+ pax_open_kernel();
101836+ *(void **)&ops->dellink = unregister_netdevice_queue;
101837+ pax_close_kernel();
101838+ }
101839
101840- list_add_tail(&ops->list, &link_ops);
101841+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101842 return 0;
101843 }
101844 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101845@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101846 for_each_net(net) {
101847 __rtnl_kill_links(net, ops);
101848 }
101849- list_del(&ops->list);
101850+ pax_list_del((struct list_head *)&ops->list);
101851 }
101852 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101853
101854@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101855 (dev->ifalias &&
101856 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101857 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101858- atomic_read(&dev->carrier_changes)))
101859+ atomic_read_unchecked(&dev->carrier_changes)))
101860 goto nla_put_failure;
101861
101862 if (1) {
101863@@ -2094,6 +2097,10 @@ replay:
101864 if (IS_ERR(dest_net))
101865 return PTR_ERR(dest_net);
101866
101867+ err = -EPERM;
101868+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101869+ goto out;
101870+
101871 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101872 if (IS_ERR(dev)) {
101873 err = PTR_ERR(dev);
101874diff --git a/net/core/scm.c b/net/core/scm.c
101875index 3b6899b..cf36238 100644
101876--- a/net/core/scm.c
101877+++ b/net/core/scm.c
101878@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101879 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101880 {
101881 struct cmsghdr __user *cm
101882- = (__force struct cmsghdr __user *)msg->msg_control;
101883+ = (struct cmsghdr __force_user *)msg->msg_control;
101884 struct cmsghdr cmhdr;
101885 int cmlen = CMSG_LEN(len);
101886 int err;
101887@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101888 err = -EFAULT;
101889 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101890 goto out;
101891- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101892+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101893 goto out;
101894 cmlen = CMSG_SPACE(len);
101895 if (msg->msg_controllen < cmlen)
101896@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101897 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101898 {
101899 struct cmsghdr __user *cm
101900- = (__force struct cmsghdr __user*)msg->msg_control;
101901+ = (struct cmsghdr __force_user *)msg->msg_control;
101902
101903 int fdmax = 0;
101904 int fdnum = scm->fp->count;
101905@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101906 if (fdnum < fdmax)
101907 fdmax = fdnum;
101908
101909- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101910+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101911 i++, cmfptr++)
101912 {
101913 struct socket *sock;
101914diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101915index 62c67be..01893a0a 100644
101916--- a/net/core/skbuff.c
101917+++ b/net/core/skbuff.c
101918@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
101919 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101920 int len, __wsum csum)
101921 {
101922- const struct skb_checksum_ops ops = {
101923+ static const struct skb_checksum_ops ops = {
101924 .update = csum_partial_ext,
101925 .combine = csum_block_add_ext,
101926 };
101927@@ -3363,12 +3363,14 @@ void __init skb_init(void)
101928 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101929 sizeof(struct sk_buff),
101930 0,
101931- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101932+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101933+ SLAB_NO_SANITIZE,
101934 NULL);
101935 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101936 sizeof(struct sk_buff_fclones),
101937 0,
101938- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101939+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101940+ SLAB_NO_SANITIZE,
101941 NULL);
101942 }
101943
101944diff --git a/net/core/sock.c b/net/core/sock.c
101945index 1c7a33d..a3817e2 100644
101946--- a/net/core/sock.c
101947+++ b/net/core/sock.c
101948@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101949 struct sk_buff_head *list = &sk->sk_receive_queue;
101950
101951 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101952- atomic_inc(&sk->sk_drops);
101953+ atomic_inc_unchecked(&sk->sk_drops);
101954 trace_sock_rcvqueue_full(sk, skb);
101955 return -ENOMEM;
101956 }
101957@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101958 return err;
101959
101960 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101961- atomic_inc(&sk->sk_drops);
101962+ atomic_inc_unchecked(&sk->sk_drops);
101963 return -ENOBUFS;
101964 }
101965
101966@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101967 skb_dst_force(skb);
101968
101969 spin_lock_irqsave(&list->lock, flags);
101970- skb->dropcount = atomic_read(&sk->sk_drops);
101971+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101972 __skb_queue_tail(list, skb);
101973 spin_unlock_irqrestore(&list->lock, flags);
101974
101975@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101976 skb->dev = NULL;
101977
101978 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101979- atomic_inc(&sk->sk_drops);
101980+ atomic_inc_unchecked(&sk->sk_drops);
101981 goto discard_and_relse;
101982 }
101983 if (nested)
101984@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101985 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101986 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101987 bh_unlock_sock(sk);
101988- atomic_inc(&sk->sk_drops);
101989+ atomic_inc_unchecked(&sk->sk_drops);
101990 goto discard_and_relse;
101991 }
101992
101993@@ -888,6 +888,7 @@ set_rcvbuf:
101994 }
101995 break;
101996
101997+#ifndef GRKERNSEC_BPF_HARDEN
101998 case SO_ATTACH_BPF:
101999 ret = -EINVAL;
102000 if (optlen == sizeof(u32)) {
102001@@ -900,7 +901,7 @@ set_rcvbuf:
102002 ret = sk_attach_bpf(ufd, sk);
102003 }
102004 break;
102005-
102006+#endif
102007 case SO_DETACH_FILTER:
102008 ret = sk_detach_filter(sk);
102009 break;
102010@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102011 struct timeval tm;
102012 } v;
102013
102014- int lv = sizeof(int);
102015- int len;
102016+ unsigned int lv = sizeof(int);
102017+ unsigned int len;
102018
102019 if (get_user(len, optlen))
102020 return -EFAULT;
102021- if (len < 0)
102022+ if (len > INT_MAX)
102023 return -EINVAL;
102024
102025 memset(&v, 0, sizeof(v));
102026@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102027
102028 case SO_PEERNAME:
102029 {
102030- char address[128];
102031+ char address[_K_SS_MAXSIZE];
102032
102033 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102034 return -ENOTCONN;
102035- if (lv < len)
102036+ if (lv < len || sizeof address < len)
102037 return -EINVAL;
102038 if (copy_to_user(optval, address, len))
102039 return -EFAULT;
102040@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102041
102042 if (len > lv)
102043 len = lv;
102044- if (copy_to_user(optval, &v, len))
102045+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102046 return -EFAULT;
102047 lenout:
102048 if (put_user(len, optlen))
102049@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102050 */
102051 smp_wmb();
102052 atomic_set(&sk->sk_refcnt, 1);
102053- atomic_set(&sk->sk_drops, 0);
102054+ atomic_set_unchecked(&sk->sk_drops, 0);
102055 }
102056 EXPORT_SYMBOL(sock_init_data);
102057
102058@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102059 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102060 int level, int type)
102061 {
102062+ struct sock_extended_err ee;
102063 struct sock_exterr_skb *serr;
102064 struct sk_buff *skb;
102065 int copied, err;
102066@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102067 sock_recv_timestamp(msg, sk, skb);
102068
102069 serr = SKB_EXT_ERR(skb);
102070- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102071+ ee = serr->ee;
102072+ put_cmsg(msg, level, type, sizeof ee, &ee);
102073
102074 msg->msg_flags |= MSG_ERRQUEUE;
102075 err = copied;
102076diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102077index ad704c7..ca48aff 100644
102078--- a/net/core/sock_diag.c
102079+++ b/net/core/sock_diag.c
102080@@ -9,26 +9,33 @@
102081 #include <linux/inet_diag.h>
102082 #include <linux/sock_diag.h>
102083
102084-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102085+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102086 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102087 static DEFINE_MUTEX(sock_diag_table_mutex);
102088
102089 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102090 {
102091+#ifndef CONFIG_GRKERNSEC_HIDESYM
102092 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102093 cookie[1] != INET_DIAG_NOCOOKIE) &&
102094 ((u32)(unsigned long)sk != cookie[0] ||
102095 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102096 return -ESTALE;
102097 else
102098+#endif
102099 return 0;
102100 }
102101 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102102
102103 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102104 {
102105+#ifdef CONFIG_GRKERNSEC_HIDESYM
102106+ cookie[0] = 0;
102107+ cookie[1] = 0;
102108+#else
102109 cookie[0] = (u32)(unsigned long)sk;
102110 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102111+#endif
102112 }
102113 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102114
102115@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102116 mutex_lock(&sock_diag_table_mutex);
102117 if (sock_diag_handlers[hndl->family])
102118 err = -EBUSY;
102119- else
102120+ else {
102121+ pax_open_kernel();
102122 sock_diag_handlers[hndl->family] = hndl;
102123+ pax_close_kernel();
102124+ }
102125 mutex_unlock(&sock_diag_table_mutex);
102126
102127 return err;
102128@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102129
102130 mutex_lock(&sock_diag_table_mutex);
102131 BUG_ON(sock_diag_handlers[family] != hnld);
102132+ pax_open_kernel();
102133 sock_diag_handlers[family] = NULL;
102134+ pax_close_kernel();
102135 mutex_unlock(&sock_diag_table_mutex);
102136 }
102137 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102138diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102139index bbb1d5a..754e2e5 100644
102140--- a/net/core/sysctl_net_core.c
102141+++ b/net/core/sysctl_net_core.c
102142@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102143 {
102144 unsigned int orig_size, size;
102145 int ret, i;
102146- struct ctl_table tmp = {
102147+ ctl_table_no_const tmp = {
102148 .data = &size,
102149 .maxlen = sizeof(size),
102150 .mode = table->mode
102151@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102152 void __user *buffer, size_t *lenp, loff_t *ppos)
102153 {
102154 char id[IFNAMSIZ];
102155- struct ctl_table tbl = {
102156+ ctl_table_no_const tbl = {
102157 .data = id,
102158 .maxlen = IFNAMSIZ,
102159 };
102160@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102161 static int proc_do_rss_key(struct ctl_table *table, int write,
102162 void __user *buffer, size_t *lenp, loff_t *ppos)
102163 {
102164- struct ctl_table fake_table;
102165+ ctl_table_no_const fake_table;
102166 char buf[NETDEV_RSS_KEY_LEN * 3];
102167
102168 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102169@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102170 .mode = 0444,
102171 .proc_handler = proc_do_rss_key,
102172 },
102173-#ifdef CONFIG_BPF_JIT
102174+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102175 {
102176 .procname = "bpf_jit_enable",
102177 .data = &bpf_jit_enable,
102178@@ -402,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
102179
102180 static __net_init int sysctl_core_net_init(struct net *net)
102181 {
102182- struct ctl_table *tbl;
102183+ ctl_table_no_const *tbl = NULL;
102184
102185 net->core.sysctl_somaxconn = SOMAXCONN;
102186
102187- tbl = netns_core_table;
102188 if (!net_eq(net, &init_net)) {
102189- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102190+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102191 if (tbl == NULL)
102192 goto err_dup;
102193
102194@@ -418,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102195 if (net->user_ns != &init_user_ns) {
102196 tbl[0].procname = NULL;
102197 }
102198- }
102199-
102200- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102201+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102202+ } else
102203+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102204 if (net->core.sysctl_hdr == NULL)
102205 goto err_reg;
102206
102207 return 0;
102208
102209 err_reg:
102210- if (tbl != netns_core_table)
102211- kfree(tbl);
102212+ kfree(tbl);
102213 err_dup:
102214 return -ENOMEM;
102215 }
102216@@ -443,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102217 kfree(tbl);
102218 }
102219
102220-static __net_initdata struct pernet_operations sysctl_core_ops = {
102221+static __net_initconst struct pernet_operations sysctl_core_ops = {
102222 .init = sysctl_core_net_init,
102223 .exit = sysctl_core_net_exit,
102224 };
102225diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102226index 8102286..a0c2755 100644
102227--- a/net/decnet/af_decnet.c
102228+++ b/net/decnet/af_decnet.c
102229@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102230 .sysctl_rmem = sysctl_decnet_rmem,
102231 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102232 .obj_size = sizeof(struct dn_sock),
102233+ .slab_flags = SLAB_USERCOPY,
102234 };
102235
102236 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102237diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102238index 4400da7..3429972 100644
102239--- a/net/decnet/dn_dev.c
102240+++ b/net/decnet/dn_dev.c
102241@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102242 .extra1 = &min_t3,
102243 .extra2 = &max_t3
102244 },
102245- {0}
102246+ { }
102247 },
102248 };
102249
102250diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102251index 5325b54..a0d4d69 100644
102252--- a/net/decnet/sysctl_net_decnet.c
102253+++ b/net/decnet/sysctl_net_decnet.c
102254@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102255
102256 if (len > *lenp) len = *lenp;
102257
102258- if (copy_to_user(buffer, addr, len))
102259+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102260 return -EFAULT;
102261
102262 *lenp = len;
102263@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102264
102265 if (len > *lenp) len = *lenp;
102266
102267- if (copy_to_user(buffer, devname, len))
102268+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102269 return -EFAULT;
102270
102271 *lenp = len;
102272diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102273index a2c7e4c..3dc9f67 100644
102274--- a/net/hsr/hsr_netlink.c
102275+++ b/net/hsr/hsr_netlink.c
102276@@ -102,7 +102,7 @@ nla_put_failure:
102277 return -EMSGSIZE;
102278 }
102279
102280-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102281+static struct rtnl_link_ops hsr_link_ops = {
102282 .kind = "hsr",
102283 .maxtype = IFLA_HSR_MAX,
102284 .policy = hsr_policy,
102285diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102286index 27eaa65..7083217 100644
102287--- a/net/ieee802154/6lowpan_rtnl.c
102288+++ b/net/ieee802154/6lowpan_rtnl.c
102289@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102290 dev_put(real_dev);
102291 }
102292
102293-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102294+static struct rtnl_link_ops lowpan_link_ops = {
102295 .kind = "lowpan",
102296 .priv_size = sizeof(struct lowpan_dev_info),
102297 .setup = lowpan_setup,
102298diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102299index 9d980ed..7d01e12 100644
102300--- a/net/ieee802154/reassembly.c
102301+++ b/net/ieee802154/reassembly.c
102302@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102303
102304 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102305 {
102306- struct ctl_table *table;
102307+ ctl_table_no_const *table = NULL;
102308 struct ctl_table_header *hdr;
102309 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102310 net_ieee802154_lowpan(net);
102311
102312- table = lowpan_frags_ns_ctl_table;
102313 if (!net_eq(net, &init_net)) {
102314- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102315+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102316 GFP_KERNEL);
102317 if (table == NULL)
102318 goto err_alloc;
102319@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102320 /* Don't export sysctls to unprivileged users */
102321 if (net->user_ns != &init_user_ns)
102322 table[0].procname = NULL;
102323- }
102324-
102325- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102326+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102327+ } else
102328+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102329 if (hdr == NULL)
102330 goto err_reg;
102331
102332@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102333 return 0;
102334
102335 err_reg:
102336- if (!net_eq(net, &init_net))
102337- kfree(table);
102338+ kfree(table);
102339 err_alloc:
102340 return -ENOMEM;
102341 }
102342diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102343index a44773c..a6ae415 100644
102344--- a/net/ipv4/af_inet.c
102345+++ b/net/ipv4/af_inet.c
102346@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102347 return ip_recv_error(sk, msg, len, addr_len);
102348 #if IS_ENABLED(CONFIG_IPV6)
102349 if (sk->sk_family == AF_INET6)
102350- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102351+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102352 #endif
102353 return -EINVAL;
102354 }
102355diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102356index 214882e..ec032f6 100644
102357--- a/net/ipv4/devinet.c
102358+++ b/net/ipv4/devinet.c
102359@@ -69,7 +69,8 @@
102360
102361 static struct ipv4_devconf ipv4_devconf = {
102362 .data = {
102363- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102364+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102365+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102366 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102367 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102368 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102369@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102370
102371 static struct ipv4_devconf ipv4_devconf_dflt = {
102372 .data = {
102373- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102374+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102375+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102376 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102377 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102378 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102379@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102380 idx = 0;
102381 head = &net->dev_index_head[h];
102382 rcu_read_lock();
102383- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102384+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102385 net->dev_base_seq;
102386 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102387 if (idx < s_idx)
102388@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102389 idx = 0;
102390 head = &net->dev_index_head[h];
102391 rcu_read_lock();
102392- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102393+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102394 net->dev_base_seq;
102395 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102396 if (idx < s_idx)
102397@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102398 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102399 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102400
102401-static struct devinet_sysctl_table {
102402+static const struct devinet_sysctl_table {
102403 struct ctl_table_header *sysctl_header;
102404 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102405 } devinet_sysctl = {
102406@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102407 int err;
102408 struct ipv4_devconf *all, *dflt;
102409 #ifdef CONFIG_SYSCTL
102410- struct ctl_table *tbl = ctl_forward_entry;
102411+ ctl_table_no_const *tbl = NULL;
102412 struct ctl_table_header *forw_hdr;
102413 #endif
102414
102415@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102416 goto err_alloc_dflt;
102417
102418 #ifdef CONFIG_SYSCTL
102419- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102420+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102421 if (tbl == NULL)
102422 goto err_alloc_ctl;
102423
102424@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102425 goto err_reg_dflt;
102426
102427 err = -ENOMEM;
102428- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102429+ if (!net_eq(net, &init_net))
102430+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102431+ else
102432+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102433 if (forw_hdr == NULL)
102434 goto err_reg_ctl;
102435 net->ipv4.forw_hdr = forw_hdr;
102436@@ -2287,8 +2292,7 @@ err_reg_ctl:
102437 err_reg_dflt:
102438 __devinet_sysctl_unregister(all);
102439 err_reg_all:
102440- if (tbl != ctl_forward_entry)
102441- kfree(tbl);
102442+ kfree(tbl);
102443 err_alloc_ctl:
102444 #endif
102445 if (dflt != &ipv4_devconf_dflt)
102446diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102447index 23104a3..9f5570b 100644
102448--- a/net/ipv4/fib_frontend.c
102449+++ b/net/ipv4/fib_frontend.c
102450@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102451 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102452 fib_sync_up(dev);
102453 #endif
102454- atomic_inc(&net->ipv4.dev_addr_genid);
102455+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102456 rt_cache_flush(dev_net(dev));
102457 break;
102458 case NETDEV_DOWN:
102459 fib_del_ifaddr(ifa, NULL);
102460- atomic_inc(&net->ipv4.dev_addr_genid);
102461+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102462 if (ifa->ifa_dev->ifa_list == NULL) {
102463 /* Last address was deleted from this interface.
102464 * Disable IP.
102465@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102466 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102467 fib_sync_up(dev);
102468 #endif
102469- atomic_inc(&net->ipv4.dev_addr_genid);
102470+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102471 rt_cache_flush(net);
102472 break;
102473 case NETDEV_DOWN:
102474diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102475index f99f41b..1879da9 100644
102476--- a/net/ipv4/fib_semantics.c
102477+++ b/net/ipv4/fib_semantics.c
102478@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102479 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102480 nh->nh_gw,
102481 nh->nh_parent->fib_scope);
102482- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102483+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102484
102485 return nh->nh_saddr;
102486 }
102487diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102488index 9111a4e..3576905 100644
102489--- a/net/ipv4/inet_hashtables.c
102490+++ b/net/ipv4/inet_hashtables.c
102491@@ -18,6 +18,7 @@
102492 #include <linux/sched.h>
102493 #include <linux/slab.h>
102494 #include <linux/wait.h>
102495+#include <linux/security.h>
102496
102497 #include <net/inet_connection_sock.h>
102498 #include <net/inet_hashtables.h>
102499@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102500 return inet_ehashfn(net, laddr, lport, faddr, fport);
102501 }
102502
102503+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102504+
102505 /*
102506 * Allocate and initialize a new local port bind bucket.
102507 * The bindhash mutex for snum's hash chain must be held here.
102508@@ -554,6 +557,8 @@ ok:
102509 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102510 spin_unlock(&head->lock);
102511
102512+ gr_update_task_in_ip_table(inet_sk(sk));
102513+
102514 if (tw) {
102515 inet_twsk_deschedule(tw, death_row);
102516 while (twrefcnt) {
102517diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102518index 241afd7..31b95d5 100644
102519--- a/net/ipv4/inetpeer.c
102520+++ b/net/ipv4/inetpeer.c
102521@@ -461,7 +461,7 @@ relookup:
102522 if (p) {
102523 p->daddr = *daddr;
102524 atomic_set(&p->refcnt, 1);
102525- atomic_set(&p->rid, 0);
102526+ atomic_set_unchecked(&p->rid, 0);
102527 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102528 p->rate_tokens = 0;
102529 /* 60*HZ is arbitrary, but chosen enough high so that the first
102530diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102531index 145a50c..5dd8cc5 100644
102532--- a/net/ipv4/ip_fragment.c
102533+++ b/net/ipv4/ip_fragment.c
102534@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102535 return 0;
102536
102537 start = qp->rid;
102538- end = atomic_inc_return(&peer->rid);
102539+ end = atomic_inc_return_unchecked(&peer->rid);
102540 qp->rid = end;
102541
102542 rc = qp->q.fragments && (end - start) > max;
102543@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102544
102545 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102546 {
102547- struct ctl_table *table;
102548+ ctl_table_no_const *table = NULL;
102549 struct ctl_table_header *hdr;
102550
102551- table = ip4_frags_ns_ctl_table;
102552 if (!net_eq(net, &init_net)) {
102553- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102554+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102555 if (table == NULL)
102556 goto err_alloc;
102557
102558@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102559 /* Don't export sysctls to unprivileged users */
102560 if (net->user_ns != &init_user_ns)
102561 table[0].procname = NULL;
102562- }
102563+ hdr = register_net_sysctl(net, "net/ipv4", table);
102564+ } else
102565+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102566
102567- hdr = register_net_sysctl(net, "net/ipv4", table);
102568 if (hdr == NULL)
102569 goto err_reg;
102570
102571@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102572 return 0;
102573
102574 err_reg:
102575- if (!net_eq(net, &init_net))
102576- kfree(table);
102577+ kfree(table);
102578 err_alloc:
102579 return -ENOMEM;
102580 }
102581diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102582index 4f4bf5b..2c936fe 100644
102583--- a/net/ipv4/ip_gre.c
102584+++ b/net/ipv4/ip_gre.c
102585@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102586 module_param(log_ecn_error, bool, 0644);
102587 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102588
102589-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102590+static struct rtnl_link_ops ipgre_link_ops;
102591 static int ipgre_tunnel_init(struct net_device *dev);
102592
102593 static int ipgre_net_id __read_mostly;
102594@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102595 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102596 };
102597
102598-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102599+static struct rtnl_link_ops ipgre_link_ops = {
102600 .kind = "gre",
102601 .maxtype = IFLA_GRE_MAX,
102602 .policy = ipgre_policy,
102603@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102604 .fill_info = ipgre_fill_info,
102605 };
102606
102607-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102608+static struct rtnl_link_ops ipgre_tap_ops = {
102609 .kind = "gretap",
102610 .maxtype = IFLA_GRE_MAX,
102611 .policy = ipgre_policy,
102612diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102613index 3d4da2c..40f9c29 100644
102614--- a/net/ipv4/ip_input.c
102615+++ b/net/ipv4/ip_input.c
102616@@ -147,6 +147,10 @@
102617 #include <linux/mroute.h>
102618 #include <linux/netlink.h>
102619
102620+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102621+extern int grsec_enable_blackhole;
102622+#endif
102623+
102624 /*
102625 * Process Router Attention IP option (RFC 2113)
102626 */
102627@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102628 if (!raw) {
102629 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102630 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102631+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102632+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102633+#endif
102634 icmp_send(skb, ICMP_DEST_UNREACH,
102635 ICMP_PROT_UNREACH, 0);
102636 }
102637diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102638index 6b85adb..cd7e5d3 100644
102639--- a/net/ipv4/ip_sockglue.c
102640+++ b/net/ipv4/ip_sockglue.c
102641@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102642 len = min_t(unsigned int, len, opt->optlen);
102643 if (put_user(len, optlen))
102644 return -EFAULT;
102645- if (copy_to_user(optval, opt->__data, len))
102646+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102647+ copy_to_user(optval, opt->__data, len))
102648 return -EFAULT;
102649 return 0;
102650 }
102651@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102652 if (sk->sk_type != SOCK_STREAM)
102653 return -ENOPROTOOPT;
102654
102655- msg.msg_control = (__force void *) optval;
102656+ msg.msg_control = (__force_kernel void *) optval;
102657 msg.msg_controllen = len;
102658 msg.msg_flags = flags;
102659
102660diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102661index 1a7e979..fd05aa4 100644
102662--- a/net/ipv4/ip_vti.c
102663+++ b/net/ipv4/ip_vti.c
102664@@ -45,7 +45,7 @@
102665 #include <net/net_namespace.h>
102666 #include <net/netns/generic.h>
102667
102668-static struct rtnl_link_ops vti_link_ops __read_mostly;
102669+static struct rtnl_link_ops vti_link_ops;
102670
102671 static int vti_net_id __read_mostly;
102672 static int vti_tunnel_init(struct net_device *dev);
102673@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102674 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102675 };
102676
102677-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102678+static struct rtnl_link_ops vti_link_ops = {
102679 .kind = "vti",
102680 .maxtype = IFLA_VTI_MAX,
102681 .policy = vti_policy,
102682diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102683index 7fa18bc..bea16af 100644
102684--- a/net/ipv4/ipconfig.c
102685+++ b/net/ipv4/ipconfig.c
102686@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102687
102688 mm_segment_t oldfs = get_fs();
102689 set_fs(get_ds());
102690- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102691+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102692 set_fs(oldfs);
102693 return res;
102694 }
102695@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102696
102697 mm_segment_t oldfs = get_fs();
102698 set_fs(get_ds());
102699- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102700+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102701 set_fs(oldfs);
102702 return res;
102703 }
102704@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102705
102706 mm_segment_t oldfs = get_fs();
102707 set_fs(get_ds());
102708- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102709+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102710 set_fs(oldfs);
102711 return res;
102712 }
102713diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102714index 40403114..c35c647 100644
102715--- a/net/ipv4/ipip.c
102716+++ b/net/ipv4/ipip.c
102717@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102718 static int ipip_net_id __read_mostly;
102719
102720 static int ipip_tunnel_init(struct net_device *dev);
102721-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102722+static struct rtnl_link_ops ipip_link_ops;
102723
102724 static int ipip_err(struct sk_buff *skb, u32 info)
102725 {
102726@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102727 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102728 };
102729
102730-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102731+static struct rtnl_link_ops ipip_link_ops = {
102732 .kind = "ipip",
102733 .maxtype = IFLA_IPTUN_MAX,
102734 .policy = ipip_policy,
102735diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102736index f95b6f9..2ee2097 100644
102737--- a/net/ipv4/netfilter/arp_tables.c
102738+++ b/net/ipv4/netfilter/arp_tables.c
102739@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102740 #endif
102741
102742 static int get_info(struct net *net, void __user *user,
102743- const int *len, int compat)
102744+ int len, int compat)
102745 {
102746 char name[XT_TABLE_MAXNAMELEN];
102747 struct xt_table *t;
102748 int ret;
102749
102750- if (*len != sizeof(struct arpt_getinfo)) {
102751- duprintf("length %u != %Zu\n", *len,
102752+ if (len != sizeof(struct arpt_getinfo)) {
102753+ duprintf("length %u != %Zu\n", len,
102754 sizeof(struct arpt_getinfo));
102755 return -EINVAL;
102756 }
102757@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102758 info.size = private->size;
102759 strcpy(info.name, name);
102760
102761- if (copy_to_user(user, &info, *len) != 0)
102762+ if (copy_to_user(user, &info, len) != 0)
102763 ret = -EFAULT;
102764 else
102765 ret = 0;
102766@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102767
102768 switch (cmd) {
102769 case ARPT_SO_GET_INFO:
102770- ret = get_info(sock_net(sk), user, len, 1);
102771+ ret = get_info(sock_net(sk), user, *len, 1);
102772 break;
102773 case ARPT_SO_GET_ENTRIES:
102774 ret = compat_get_entries(sock_net(sk), user, len);
102775@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102776
102777 switch (cmd) {
102778 case ARPT_SO_GET_INFO:
102779- ret = get_info(sock_net(sk), user, len, 0);
102780+ ret = get_info(sock_net(sk), user, *len, 0);
102781 break;
102782
102783 case ARPT_SO_GET_ENTRIES:
102784diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102785index 99e810f..3711b81 100644
102786--- a/net/ipv4/netfilter/ip_tables.c
102787+++ b/net/ipv4/netfilter/ip_tables.c
102788@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102789 #endif
102790
102791 static int get_info(struct net *net, void __user *user,
102792- const int *len, int compat)
102793+ int len, int compat)
102794 {
102795 char name[XT_TABLE_MAXNAMELEN];
102796 struct xt_table *t;
102797 int ret;
102798
102799- if (*len != sizeof(struct ipt_getinfo)) {
102800- duprintf("length %u != %zu\n", *len,
102801+ if (len != sizeof(struct ipt_getinfo)) {
102802+ duprintf("length %u != %zu\n", len,
102803 sizeof(struct ipt_getinfo));
102804 return -EINVAL;
102805 }
102806@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102807 info.size = private->size;
102808 strcpy(info.name, name);
102809
102810- if (copy_to_user(user, &info, *len) != 0)
102811+ if (copy_to_user(user, &info, len) != 0)
102812 ret = -EFAULT;
102813 else
102814 ret = 0;
102815@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102816
102817 switch (cmd) {
102818 case IPT_SO_GET_INFO:
102819- ret = get_info(sock_net(sk), user, len, 1);
102820+ ret = get_info(sock_net(sk), user, *len, 1);
102821 break;
102822 case IPT_SO_GET_ENTRIES:
102823 ret = compat_get_entries(sock_net(sk), user, len);
102824@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102825
102826 switch (cmd) {
102827 case IPT_SO_GET_INFO:
102828- ret = get_info(sock_net(sk), user, len, 0);
102829+ ret = get_info(sock_net(sk), user, *len, 0);
102830 break;
102831
102832 case IPT_SO_GET_ENTRIES:
102833diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102834index e90f83a..3e6acca 100644
102835--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102836+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102837@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102838 spin_lock_init(&cn->lock);
102839
102840 #ifdef CONFIG_PROC_FS
102841- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102842+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102843 if (!cn->procdir) {
102844 pr_err("Unable to proc dir entry\n");
102845 return -ENOMEM;
102846diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102847index 0ae28f5..d32b565 100644
102848--- a/net/ipv4/ping.c
102849+++ b/net/ipv4/ping.c
102850@@ -59,7 +59,7 @@ struct ping_table {
102851 };
102852
102853 static struct ping_table ping_table;
102854-struct pingv6_ops pingv6_ops;
102855+struct pingv6_ops *pingv6_ops;
102856 EXPORT_SYMBOL_GPL(pingv6_ops);
102857
102858 static u16 ping_port_rover;
102859@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102860 return -ENODEV;
102861 }
102862 }
102863- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102864+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102865 scoped);
102866 rcu_read_unlock();
102867
102868@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102869 }
102870 #if IS_ENABLED(CONFIG_IPV6)
102871 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102872- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102873+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102874 #endif
102875 }
102876
102877@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102878 info, (u8 *)icmph);
102879 #if IS_ENABLED(CONFIG_IPV6)
102880 } else if (family == AF_INET6) {
102881- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102882+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102883 info, (u8 *)icmph);
102884 #endif
102885 }
102886@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102887 }
102888
102889 if (inet6_sk(sk)->rxopt.all)
102890- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102891+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102892 if (skb->protocol == htons(ETH_P_IPV6) &&
102893 inet6_sk(sk)->rxopt.all)
102894- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102895+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102896 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102897 ip_cmsg_recv(msg, skb);
102898 #endif
102899@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102900 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102901 0, sock_i_ino(sp),
102902 atomic_read(&sp->sk_refcnt), sp,
102903- atomic_read(&sp->sk_drops));
102904+ atomic_read_unchecked(&sp->sk_drops));
102905 }
102906
102907 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102908diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102909index 0bb68df..59405fc 100644
102910--- a/net/ipv4/raw.c
102911+++ b/net/ipv4/raw.c
102912@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102913 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102914 {
102915 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102916- atomic_inc(&sk->sk_drops);
102917+ atomic_inc_unchecked(&sk->sk_drops);
102918 kfree_skb(skb);
102919 return NET_RX_DROP;
102920 }
102921@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
102922
102923 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102924 {
102925+ struct icmp_filter filter;
102926+
102927 if (optlen > sizeof(struct icmp_filter))
102928 optlen = sizeof(struct icmp_filter);
102929- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102930+ if (copy_from_user(&filter, optval, optlen))
102931 return -EFAULT;
102932+ raw_sk(sk)->filter = filter;
102933 return 0;
102934 }
102935
102936 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102937 {
102938 int len, ret = -EFAULT;
102939+ struct icmp_filter filter;
102940
102941 if (get_user(len, optlen))
102942 goto out;
102943@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102944 if (len > sizeof(struct icmp_filter))
102945 len = sizeof(struct icmp_filter);
102946 ret = -EFAULT;
102947- if (put_user(len, optlen) ||
102948- copy_to_user(optval, &raw_sk(sk)->filter, len))
102949+ filter = raw_sk(sk)->filter;
102950+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102951 goto out;
102952 ret = 0;
102953 out: return ret;
102954@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102955 0, 0L, 0,
102956 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102957 0, sock_i_ino(sp),
102958- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102959+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102960 }
102961
102962 static int raw_seq_show(struct seq_file *seq, void *v)
102963diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102964index 52e1f2b..e736cb4 100644
102965--- a/net/ipv4/route.c
102966+++ b/net/ipv4/route.c
102967@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102968
102969 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102970 {
102971- return seq_open(file, &rt_cache_seq_ops);
102972+ return seq_open_restrict(file, &rt_cache_seq_ops);
102973 }
102974
102975 static const struct file_operations rt_cache_seq_fops = {
102976@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102977
102978 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102979 {
102980- return seq_open(file, &rt_cpu_seq_ops);
102981+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102982 }
102983
102984 static const struct file_operations rt_cpu_seq_fops = {
102985@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102986
102987 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102988 {
102989- return single_open(file, rt_acct_proc_show, NULL);
102990+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102991 }
102992
102993 static const struct file_operations rt_acct_proc_fops = {
102994@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102995
102996 #define IP_IDENTS_SZ 2048u
102997 struct ip_ident_bucket {
102998- atomic_t id;
102999+ atomic_unchecked_t id;
103000 u32 stamp32;
103001 };
103002
103003-static struct ip_ident_bucket *ip_idents __read_mostly;
103004+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103005
103006 /* In order to protect privacy, we add a perturbation to identifiers
103007 * if one generator is seldom used. This makes hard for an attacker
103008@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103009 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103010 delta = prandom_u32_max(now - old);
103011
103012- return atomic_add_return(segs + delta, &bucket->id) - segs;
103013+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103014 }
103015 EXPORT_SYMBOL(ip_idents_reserve);
103016
103017@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103018 .maxlen = sizeof(int),
103019 .mode = 0200,
103020 .proc_handler = ipv4_sysctl_rtcache_flush,
103021+ .extra1 = &init_net,
103022 },
103023 { },
103024 };
103025
103026 static __net_init int sysctl_route_net_init(struct net *net)
103027 {
103028- struct ctl_table *tbl;
103029+ ctl_table_no_const *tbl = NULL;
103030
103031- tbl = ipv4_route_flush_table;
103032 if (!net_eq(net, &init_net)) {
103033- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103034+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103035 if (tbl == NULL)
103036 goto err_dup;
103037
103038 /* Don't export sysctls to unprivileged users */
103039 if (net->user_ns != &init_user_ns)
103040 tbl[0].procname = NULL;
103041- }
103042- tbl[0].extra1 = net;
103043+ tbl[0].extra1 = net;
103044+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103045+ } else
103046+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103047
103048- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103049 if (net->ipv4.route_hdr == NULL)
103050 goto err_reg;
103051 return 0;
103052
103053 err_reg:
103054- if (tbl != ipv4_route_flush_table)
103055- kfree(tbl);
103056+ kfree(tbl);
103057 err_dup:
103058 return -ENOMEM;
103059 }
103060@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103061
103062 static __net_init int rt_genid_init(struct net *net)
103063 {
103064- atomic_set(&net->ipv4.rt_genid, 0);
103065- atomic_set(&net->fnhe_genid, 0);
103066+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103067+ atomic_set_unchecked(&net->fnhe_genid, 0);
103068 get_random_bytes(&net->ipv4.dev_addr_genid,
103069 sizeof(net->ipv4.dev_addr_genid));
103070 return 0;
103071@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
103072 {
103073 int rc = 0;
103074
103075- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103076- if (!ip_idents)
103077- panic("IP: failed to allocate ip_idents\n");
103078-
103079- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103080+ prandom_bytes(ip_idents, sizeof(ip_idents));
103081
103082 #ifdef CONFIG_IP_ROUTE_CLASSID
103083 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103084diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103085index e0ee384..e2688d9 100644
103086--- a/net/ipv4/sysctl_net_ipv4.c
103087+++ b/net/ipv4/sysctl_net_ipv4.c
103088@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103089 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103090 int ret;
103091 int range[2];
103092- struct ctl_table tmp = {
103093+ ctl_table_no_const tmp = {
103094 .data = &range,
103095 .maxlen = sizeof(range),
103096 .mode = table->mode,
103097@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103098 int ret;
103099 gid_t urange[2];
103100 kgid_t low, high;
103101- struct ctl_table tmp = {
103102+ ctl_table_no_const tmp = {
103103 .data = &urange,
103104 .maxlen = sizeof(urange),
103105 .mode = table->mode,
103106@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103107 void __user *buffer, size_t *lenp, loff_t *ppos)
103108 {
103109 char val[TCP_CA_NAME_MAX];
103110- struct ctl_table tbl = {
103111+ ctl_table_no_const tbl = {
103112 .data = val,
103113 .maxlen = TCP_CA_NAME_MAX,
103114 };
103115@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103116 void __user *buffer, size_t *lenp,
103117 loff_t *ppos)
103118 {
103119- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103120+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103121 int ret;
103122
103123 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103124@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103125 void __user *buffer, size_t *lenp,
103126 loff_t *ppos)
103127 {
103128- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103129+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103130 int ret;
103131
103132 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103133@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103134 void __user *buffer, size_t *lenp,
103135 loff_t *ppos)
103136 {
103137- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103138+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103139 struct tcp_fastopen_context *ctxt;
103140 int ret;
103141 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103142@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
103143
103144 static __net_init int ipv4_sysctl_init_net(struct net *net)
103145 {
103146- struct ctl_table *table;
103147+ ctl_table_no_const *table = NULL;
103148
103149- table = ipv4_net_table;
103150 if (!net_eq(net, &init_net)) {
103151 int i;
103152
103153- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103154+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103155 if (table == NULL)
103156 goto err_alloc;
103157
103158@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103159 table[i].data += (void *)net - (void *)&init_net;
103160 }
103161
103162- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103163+ if (!net_eq(net, &init_net))
103164+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103165+ else
103166+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103167 if (net->ipv4.ipv4_hdr == NULL)
103168 goto err_reg;
103169
103170diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103171index 075ab4d..623bb9d 100644
103172--- a/net/ipv4/tcp_input.c
103173+++ b/net/ipv4/tcp_input.c
103174@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103175 * without any lock. We want to make sure compiler wont store
103176 * intermediate values in this location.
103177 */
103178- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103179+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103180 sk->sk_max_pacing_rate);
103181 }
103182
103183@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103184 * simplifies code)
103185 */
103186 static void
103187-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103188+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103189 struct sk_buff *head, struct sk_buff *tail,
103190 u32 start, u32 end)
103191 {
103192@@ -5506,6 +5506,7 @@ discard:
103193 tcp_paws_reject(&tp->rx_opt, 0))
103194 goto discard_and_undo;
103195
103196+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103197 if (th->syn) {
103198 /* We see SYN without ACK. It is attempt of
103199 * simultaneous connect with crossed SYNs.
103200@@ -5556,6 +5557,7 @@ discard:
103201 goto discard;
103202 #endif
103203 }
103204+#endif
103205 /* "fifth, if neither of the SYN or RST bits is set then
103206 * drop the segment and return."
103207 */
103208@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103209 goto discard;
103210
103211 if (th->syn) {
103212- if (th->fin)
103213+ if (th->fin || th->urg || th->psh)
103214 goto discard;
103215 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103216 return 1;
103217diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103218index d22f544..62f6787 100644
103219--- a/net/ipv4/tcp_ipv4.c
103220+++ b/net/ipv4/tcp_ipv4.c
103221@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103222 int sysctl_tcp_low_latency __read_mostly;
103223 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103224
103225+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103226+extern int grsec_enable_blackhole;
103227+#endif
103228+
103229 #ifdef CONFIG_TCP_MD5SIG
103230 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103231 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103232@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103233 return 0;
103234
103235 reset:
103236+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103237+ if (!grsec_enable_blackhole)
103238+#endif
103239 tcp_v4_send_reset(rsk, skb);
103240 discard:
103241 kfree_skb(skb);
103242@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103243 TCP_SKB_CB(skb)->sacked = 0;
103244
103245 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103246- if (!sk)
103247+ if (!sk) {
103248+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103249+ ret = 1;
103250+#endif
103251 goto no_tcp_socket;
103252-
103253+ }
103254 process:
103255- if (sk->sk_state == TCP_TIME_WAIT)
103256+ if (sk->sk_state == TCP_TIME_WAIT) {
103257+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103258+ ret = 2;
103259+#endif
103260 goto do_time_wait;
103261+ }
103262
103263 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103264 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103265@@ -1698,6 +1712,10 @@ csum_error:
103266 bad_packet:
103267 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103268 } else {
103269+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103270+ if (!grsec_enable_blackhole || (ret == 1 &&
103271+ (skb->dev->flags & IFF_LOOPBACK)))
103272+#endif
103273 tcp_v4_send_reset(NULL, skb);
103274 }
103275
103276diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103277index 63d2680..2db9d6b 100644
103278--- a/net/ipv4/tcp_minisocks.c
103279+++ b/net/ipv4/tcp_minisocks.c
103280@@ -27,6 +27,10 @@
103281 #include <net/inet_common.h>
103282 #include <net/xfrm.h>
103283
103284+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103285+extern int grsec_enable_blackhole;
103286+#endif
103287+
103288 int sysctl_tcp_syncookies __read_mostly = 1;
103289 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103290
103291@@ -739,7 +743,10 @@ embryonic_reset:
103292 * avoid becoming vulnerable to outside attack aiming at
103293 * resetting legit local connections.
103294 */
103295- req->rsk_ops->send_reset(sk, skb);
103296+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103297+ if (!grsec_enable_blackhole)
103298+#endif
103299+ req->rsk_ops->send_reset(sk, skb);
103300 } else if (fastopen) { /* received a valid RST pkt */
103301 reqsk_fastopen_remove(sk, req, true);
103302 tcp_reset(sk);
103303diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103304index ebf5ff5..4d1ff32 100644
103305--- a/net/ipv4/tcp_probe.c
103306+++ b/net/ipv4/tcp_probe.c
103307@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103308 if (cnt + width >= len)
103309 break;
103310
103311- if (copy_to_user(buf + cnt, tbuf, width))
103312+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103313 return -EFAULT;
103314 cnt += width;
103315 }
103316diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103317index 1829c7f..c0b3d52 100644
103318--- a/net/ipv4/tcp_timer.c
103319+++ b/net/ipv4/tcp_timer.c
103320@@ -22,6 +22,10 @@
103321 #include <linux/gfp.h>
103322 #include <net/tcp.h>
103323
103324+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103325+extern int grsec_lastack_retries;
103326+#endif
103327+
103328 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103329 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103330 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103331@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103332 }
103333 }
103334
103335+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103336+ if ((sk->sk_state == TCP_LAST_ACK) &&
103337+ (grsec_lastack_retries > 0) &&
103338+ (grsec_lastack_retries < retry_until))
103339+ retry_until = grsec_lastack_retries;
103340+#endif
103341+
103342 if (retransmits_timed_out(sk, retry_until,
103343 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103344 /* Has it gone just too far? */
103345diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103346index 13b4dcf..b866a2a 100644
103347--- a/net/ipv4/udp.c
103348+++ b/net/ipv4/udp.c
103349@@ -87,6 +87,7 @@
103350 #include <linux/types.h>
103351 #include <linux/fcntl.h>
103352 #include <linux/module.h>
103353+#include <linux/security.h>
103354 #include <linux/socket.h>
103355 #include <linux/sockios.h>
103356 #include <linux/igmp.h>
103357@@ -114,6 +115,10 @@
103358 #include <net/busy_poll.h>
103359 #include "udp_impl.h"
103360
103361+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103362+extern int grsec_enable_blackhole;
103363+#endif
103364+
103365 struct udp_table udp_table __read_mostly;
103366 EXPORT_SYMBOL(udp_table);
103367
103368@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103369 return true;
103370 }
103371
103372+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103373+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103374+
103375 /*
103376 * This routine is called by the ICMP module when it gets some
103377 * sort of error condition. If err < 0 then the socket should
103378@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103379 dport = usin->sin_port;
103380 if (dport == 0)
103381 return -EINVAL;
103382+
103383+ err = gr_search_udp_sendmsg(sk, usin);
103384+ if (err)
103385+ return err;
103386 } else {
103387 if (sk->sk_state != TCP_ESTABLISHED)
103388 return -EDESTADDRREQ;
103389+
103390+ err = gr_search_udp_sendmsg(sk, NULL);
103391+ if (err)
103392+ return err;
103393+
103394 daddr = inet->inet_daddr;
103395 dport = inet->inet_dport;
103396 /* Open fast path for connected socket.
103397@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103398 IS_UDPLITE(sk));
103399 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103400 IS_UDPLITE(sk));
103401- atomic_inc(&sk->sk_drops);
103402+ atomic_inc_unchecked(&sk->sk_drops);
103403 __skb_unlink(skb, rcvq);
103404 __skb_queue_tail(&list_kill, skb);
103405 }
103406@@ -1275,6 +1292,10 @@ try_again:
103407 if (!skb)
103408 goto out;
103409
103410+ err = gr_search_udp_recvmsg(sk, skb);
103411+ if (err)
103412+ goto out_free;
103413+
103414 ulen = skb->len - sizeof(struct udphdr);
103415 copied = len;
103416 if (copied > ulen)
103417@@ -1307,7 +1328,7 @@ try_again:
103418 if (unlikely(err)) {
103419 trace_kfree_skb(skb, udp_recvmsg);
103420 if (!peeked) {
103421- atomic_inc(&sk->sk_drops);
103422+ atomic_inc_unchecked(&sk->sk_drops);
103423 UDP_INC_STATS_USER(sock_net(sk),
103424 UDP_MIB_INERRORS, is_udplite);
103425 }
103426@@ -1605,7 +1626,7 @@ csum_error:
103427 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103428 drop:
103429 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103430- atomic_inc(&sk->sk_drops);
103431+ atomic_inc_unchecked(&sk->sk_drops);
103432 kfree_skb(skb);
103433 return -1;
103434 }
103435@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103436 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103437
103438 if (!skb1) {
103439- atomic_inc(&sk->sk_drops);
103440+ atomic_inc_unchecked(&sk->sk_drops);
103441 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103442 IS_UDPLITE(sk));
103443 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103444@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103445 goto csum_error;
103446
103447 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103448+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103449+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103450+#endif
103451 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103452
103453 /*
103454@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103455 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103456 0, sock_i_ino(sp),
103457 atomic_read(&sp->sk_refcnt), sp,
103458- atomic_read(&sp->sk_drops));
103459+ atomic_read_unchecked(&sp->sk_drops));
103460 }
103461
103462 int udp4_seq_show(struct seq_file *seq, void *v)
103463diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103464index 6156f68..d6ab46d 100644
103465--- a/net/ipv4/xfrm4_policy.c
103466+++ b/net/ipv4/xfrm4_policy.c
103467@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103468 fl4->flowi4_tos = iph->tos;
103469 }
103470
103471-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103472+static int xfrm4_garbage_collect(struct dst_ops *ops)
103473 {
103474 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103475
103476- xfrm4_policy_afinfo.garbage_collect(net);
103477+ xfrm_garbage_collect_deferred(net);
103478 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103479 }
103480
103481@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103482
103483 static int __net_init xfrm4_net_init(struct net *net)
103484 {
103485- struct ctl_table *table;
103486+ ctl_table_no_const *table = NULL;
103487 struct ctl_table_header *hdr;
103488
103489- table = xfrm4_policy_table;
103490 if (!net_eq(net, &init_net)) {
103491- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103492+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103493 if (!table)
103494 goto err_alloc;
103495
103496 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103497- }
103498-
103499- hdr = register_net_sysctl(net, "net/ipv4", table);
103500+ hdr = register_net_sysctl(net, "net/ipv4", table);
103501+ } else
103502+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103503 if (!hdr)
103504 goto err_reg;
103505
103506@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103507 return 0;
103508
103509 err_reg:
103510- if (!net_eq(net, &init_net))
103511- kfree(table);
103512+ kfree(table);
103513 err_alloc:
103514 return -ENOMEM;
103515 }
103516diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103517index dac9419..534fa31 100644
103518--- a/net/ipv6/addrconf.c
103519+++ b/net/ipv6/addrconf.c
103520@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103521 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103522 .mtu6 = IPV6_MIN_MTU,
103523 .accept_ra = 1,
103524- .accept_redirects = 1,
103525+ .accept_redirects = 0,
103526 .autoconf = 1,
103527 .force_mld_version = 0,
103528 .mldv1_unsolicited_report_interval = 10 * HZ,
103529@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103530 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103531 .mtu6 = IPV6_MIN_MTU,
103532 .accept_ra = 1,
103533- .accept_redirects = 1,
103534+ .accept_redirects = 0,
103535 .autoconf = 1,
103536 .force_mld_version = 0,
103537 .mldv1_unsolicited_report_interval = 10 * HZ,
103538@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103539 idx = 0;
103540 head = &net->dev_index_head[h];
103541 rcu_read_lock();
103542- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103543+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103544 net->dev_base_seq;
103545 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103546 if (idx < s_idx)
103547@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103548 p.iph.ihl = 5;
103549 p.iph.protocol = IPPROTO_IPV6;
103550 p.iph.ttl = 64;
103551- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103552+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103553
103554 if (ops->ndo_do_ioctl) {
103555 mm_segment_t oldfs = get_fs();
103556@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103557 .release = seq_release_net,
103558 };
103559
103560+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103561+extern void unregister_ipv6_seq_ops_addr(void);
103562+
103563 static int __net_init if6_proc_net_init(struct net *net)
103564 {
103565- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103566+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103567+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103568+ unregister_ipv6_seq_ops_addr();
103569 return -ENOMEM;
103570+ }
103571 return 0;
103572 }
103573
103574 static void __net_exit if6_proc_net_exit(struct net *net)
103575 {
103576 remove_proc_entry("if_inet6", net->proc_net);
103577+ unregister_ipv6_seq_ops_addr();
103578 }
103579
103580 static struct pernet_operations if6_proc_net_ops = {
103581@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103582 s_ip_idx = ip_idx = cb->args[2];
103583
103584 rcu_read_lock();
103585- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103586+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103587 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103588 idx = 0;
103589 head = &net->dev_index_head[h];
103590@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103591 rt_genid_bump_ipv6(net);
103592 break;
103593 }
103594- atomic_inc(&net->ipv6.dev_addr_genid);
103595+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103596 }
103597
103598 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103599@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103600 int *valp = ctl->data;
103601 int val = *valp;
103602 loff_t pos = *ppos;
103603- struct ctl_table lctl;
103604+ ctl_table_no_const lctl;
103605 int ret;
103606
103607 /*
103608@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103609 int *valp = ctl->data;
103610 int val = *valp;
103611 loff_t pos = *ppos;
103612- struct ctl_table lctl;
103613+ ctl_table_no_const lctl;
103614 int ret;
103615
103616 /*
103617diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103618index e8c4400..a4cd5da 100644
103619--- a/net/ipv6/af_inet6.c
103620+++ b/net/ipv6/af_inet6.c
103621@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103622 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103623 net->ipv6.sysctl.flowlabel_consistency = 1;
103624 net->ipv6.sysctl.auto_flowlabels = 0;
103625- atomic_set(&net->ipv6.fib6_sernum, 1);
103626+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103627
103628 err = ipv6_init_mibs(net);
103629 if (err)
103630diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103631index 49f5e73..ae02d54 100644
103632--- a/net/ipv6/datagram.c
103633+++ b/net/ipv6/datagram.c
103634@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103635 0,
103636 sock_i_ino(sp),
103637 atomic_read(&sp->sk_refcnt), sp,
103638- atomic_read(&sp->sk_drops));
103639+ atomic_read_unchecked(&sp->sk_drops));
103640 }
103641diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103642index d674152..fb5a01d 100644
103643--- a/net/ipv6/icmp.c
103644+++ b/net/ipv6/icmp.c
103645@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103646
103647 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103648 {
103649- struct ctl_table *table;
103650+ ctl_table_no_const *table;
103651
103652 table = kmemdup(ipv6_icmp_table_template,
103653 sizeof(ipv6_icmp_table_template),
103654diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103655index f1c6d5e..faabef6 100644
103656--- a/net/ipv6/ip6_fib.c
103657+++ b/net/ipv6/ip6_fib.c
103658@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103659 int new, old;
103660
103661 do {
103662- old = atomic_read(&net->ipv6.fib6_sernum);
103663+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103664 new = old < INT_MAX ? old + 1 : 1;
103665- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103666+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103667 old, new) != old);
103668 return new;
103669 }
103670diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103671index 01ccc28..66861c7 100644
103672--- a/net/ipv6/ip6_gre.c
103673+++ b/net/ipv6/ip6_gre.c
103674@@ -71,8 +71,8 @@ struct ip6gre_net {
103675 struct net_device *fb_tunnel_dev;
103676 };
103677
103678-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103679-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103680+static struct rtnl_link_ops ip6gre_link_ops;
103681+static struct rtnl_link_ops ip6gre_tap_ops;
103682 static int ip6gre_tunnel_init(struct net_device *dev);
103683 static void ip6gre_tunnel_setup(struct net_device *dev);
103684 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103685@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103686 }
103687
103688
103689-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103690+static struct inet6_protocol ip6gre_protocol = {
103691 .handler = ip6gre_rcv,
103692 .err_handler = ip6gre_err,
103693 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103694@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103695 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103696 };
103697
103698-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103699+static struct rtnl_link_ops ip6gre_link_ops = {
103700 .kind = "ip6gre",
103701 .maxtype = IFLA_GRE_MAX,
103702 .policy = ip6gre_policy,
103703@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103704 .fill_info = ip6gre_fill_info,
103705 };
103706
103707-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103708+static struct rtnl_link_ops ip6gre_tap_ops = {
103709 .kind = "ip6gretap",
103710 .maxtype = IFLA_GRE_MAX,
103711 .policy = ip6gre_policy,
103712diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103713index 92b3da5..77837b8 100644
103714--- a/net/ipv6/ip6_tunnel.c
103715+++ b/net/ipv6/ip6_tunnel.c
103716@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103717
103718 static int ip6_tnl_dev_init(struct net_device *dev);
103719 static void ip6_tnl_dev_setup(struct net_device *dev);
103720-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103721+static struct rtnl_link_ops ip6_link_ops;
103722
103723 static int ip6_tnl_net_id __read_mostly;
103724 struct ip6_tnl_net {
103725@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103726 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103727 };
103728
103729-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103730+static struct rtnl_link_ops ip6_link_ops = {
103731 .kind = "ip6tnl",
103732 .maxtype = IFLA_IPTUN_MAX,
103733 .policy = ip6_tnl_policy,
103734diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103735index ace10d0..97a8b49 100644
103736--- a/net/ipv6/ip6_vti.c
103737+++ b/net/ipv6/ip6_vti.c
103738@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103739
103740 static int vti6_dev_init(struct net_device *dev);
103741 static void vti6_dev_setup(struct net_device *dev);
103742-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103743+static struct rtnl_link_ops vti6_link_ops;
103744
103745 static int vti6_net_id __read_mostly;
103746 struct vti6_net {
103747@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103748 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103749 };
103750
103751-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103752+static struct rtnl_link_ops vti6_link_ops = {
103753 .kind = "vti6",
103754 .maxtype = IFLA_VTI_MAX,
103755 .policy = vti6_policy,
103756diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103757index 66980d8d..8aef0d1 100644
103758--- a/net/ipv6/ipv6_sockglue.c
103759+++ b/net/ipv6/ipv6_sockglue.c
103760@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103761 if (sk->sk_type != SOCK_STREAM)
103762 return -ENOPROTOOPT;
103763
103764- msg.msg_control = optval;
103765+ msg.msg_control = (void __force_kernel *)optval;
103766 msg.msg_controllen = len;
103767 msg.msg_flags = flags;
103768
103769diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103770index e080fbb..412b3cf 100644
103771--- a/net/ipv6/netfilter/ip6_tables.c
103772+++ b/net/ipv6/netfilter/ip6_tables.c
103773@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103774 #endif
103775
103776 static int get_info(struct net *net, void __user *user,
103777- const int *len, int compat)
103778+ int len, int compat)
103779 {
103780 char name[XT_TABLE_MAXNAMELEN];
103781 struct xt_table *t;
103782 int ret;
103783
103784- if (*len != sizeof(struct ip6t_getinfo)) {
103785- duprintf("length %u != %zu\n", *len,
103786+ if (len != sizeof(struct ip6t_getinfo)) {
103787+ duprintf("length %u != %zu\n", len,
103788 sizeof(struct ip6t_getinfo));
103789 return -EINVAL;
103790 }
103791@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103792 info.size = private->size;
103793 strcpy(info.name, name);
103794
103795- if (copy_to_user(user, &info, *len) != 0)
103796+ if (copy_to_user(user, &info, len) != 0)
103797 ret = -EFAULT;
103798 else
103799 ret = 0;
103800@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103801
103802 switch (cmd) {
103803 case IP6T_SO_GET_INFO:
103804- ret = get_info(sock_net(sk), user, len, 1);
103805+ ret = get_info(sock_net(sk), user, *len, 1);
103806 break;
103807 case IP6T_SO_GET_ENTRIES:
103808 ret = compat_get_entries(sock_net(sk), user, len);
103809@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103810
103811 switch (cmd) {
103812 case IP6T_SO_GET_INFO:
103813- ret = get_info(sock_net(sk), user, len, 0);
103814+ ret = get_info(sock_net(sk), user, *len, 0);
103815 break;
103816
103817 case IP6T_SO_GET_ENTRIES:
103818diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103819index 6f187c8..34b367f 100644
103820--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103821+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103822@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103823
103824 static int nf_ct_frag6_sysctl_register(struct net *net)
103825 {
103826- struct ctl_table *table;
103827+ ctl_table_no_const *table = NULL;
103828 struct ctl_table_header *hdr;
103829
103830- table = nf_ct_frag6_sysctl_table;
103831 if (!net_eq(net, &init_net)) {
103832- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103833+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103834 GFP_KERNEL);
103835 if (table == NULL)
103836 goto err_alloc;
103837@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103838 table[2].data = &net->nf_frag.frags.high_thresh;
103839 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103840 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103841- }
103842-
103843- hdr = register_net_sysctl(net, "net/netfilter", table);
103844+ hdr = register_net_sysctl(net, "net/netfilter", table);
103845+ } else
103846+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103847 if (hdr == NULL)
103848 goto err_reg;
103849
103850@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103851 return 0;
103852
103853 err_reg:
103854- if (!net_eq(net, &init_net))
103855- kfree(table);
103856+ kfree(table);
103857 err_alloc:
103858 return -ENOMEM;
103859 }
103860diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103861index fe7e3e4..47aba96 100644
103862--- a/net/ipv6/ping.c
103863+++ b/net/ipv6/ping.c
103864@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103865 };
103866 #endif
103867
103868+static struct pingv6_ops real_pingv6_ops = {
103869+ .ipv6_recv_error = ipv6_recv_error,
103870+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103871+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103872+ .icmpv6_err_convert = icmpv6_err_convert,
103873+ .ipv6_icmp_error = ipv6_icmp_error,
103874+ .ipv6_chk_addr = ipv6_chk_addr,
103875+};
103876+
103877+static struct pingv6_ops dummy_pingv6_ops = {
103878+ .ipv6_recv_error = dummy_ipv6_recv_error,
103879+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103880+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103881+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103882+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103883+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103884+};
103885+
103886 int __init pingv6_init(void)
103887 {
103888 #ifdef CONFIG_PROC_FS
103889@@ -249,13 +267,7 @@ int __init pingv6_init(void)
103890 if (ret)
103891 return ret;
103892 #endif
103893- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103894- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103895- pingv6_ops.ip6_datagram_recv_specific_ctl =
103896- ip6_datagram_recv_specific_ctl;
103897- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103898- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103899- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103900+ pingv6_ops = &real_pingv6_ops;
103901 return inet6_register_protosw(&pingv6_protosw);
103902 }
103903
103904@@ -264,14 +276,9 @@ int __init pingv6_init(void)
103905 */
103906 void pingv6_exit(void)
103907 {
103908- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103909- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103910- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103911- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103912- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103913- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103914 #ifdef CONFIG_PROC_FS
103915 unregister_pernet_subsys(&ping_v6_net_ops);
103916 #endif
103917+ pingv6_ops = &dummy_pingv6_ops;
103918 inet6_unregister_protosw(&pingv6_protosw);
103919 }
103920diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103921index 679253d0..70b653c 100644
103922--- a/net/ipv6/proc.c
103923+++ b/net/ipv6/proc.c
103924@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103925 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103926 goto proc_snmp6_fail;
103927
103928- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103929+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103930 if (!net->mib.proc_net_devsnmp6)
103931 goto proc_dev_snmp6_fail;
103932 return 0;
103933diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103934index ee25631..3c3ac5d 100644
103935--- a/net/ipv6/raw.c
103936+++ b/net/ipv6/raw.c
103937@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103938 {
103939 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103940 skb_checksum_complete(skb)) {
103941- atomic_inc(&sk->sk_drops);
103942+ atomic_inc_unchecked(&sk->sk_drops);
103943 kfree_skb(skb);
103944 return NET_RX_DROP;
103945 }
103946@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103947 struct raw6_sock *rp = raw6_sk(sk);
103948
103949 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103950- atomic_inc(&sk->sk_drops);
103951+ atomic_inc_unchecked(&sk->sk_drops);
103952 kfree_skb(skb);
103953 return NET_RX_DROP;
103954 }
103955@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103956
103957 if (inet->hdrincl) {
103958 if (skb_checksum_complete(skb)) {
103959- atomic_inc(&sk->sk_drops);
103960+ atomic_inc_unchecked(&sk->sk_drops);
103961 kfree_skb(skb);
103962 return NET_RX_DROP;
103963 }
103964@@ -609,7 +609,7 @@ out:
103965 return err;
103966 }
103967
103968-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103969+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103970 struct flowi6 *fl6, struct dst_entry **dstp,
103971 unsigned int flags)
103972 {
103973@@ -916,12 +916,15 @@ do_confirm:
103974 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103975 char __user *optval, int optlen)
103976 {
103977+ struct icmp6_filter filter;
103978+
103979 switch (optname) {
103980 case ICMPV6_FILTER:
103981 if (optlen > sizeof(struct icmp6_filter))
103982 optlen = sizeof(struct icmp6_filter);
103983- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103984+ if (copy_from_user(&filter, optval, optlen))
103985 return -EFAULT;
103986+ raw6_sk(sk)->filter = filter;
103987 return 0;
103988 default:
103989 return -ENOPROTOOPT;
103990@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103991 char __user *optval, int __user *optlen)
103992 {
103993 int len;
103994+ struct icmp6_filter filter;
103995
103996 switch (optname) {
103997 case ICMPV6_FILTER:
103998@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103999 len = sizeof(struct icmp6_filter);
104000 if (put_user(len, optlen))
104001 return -EFAULT;
104002- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104003+ filter = raw6_sk(sk)->filter;
104004+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104005 return -EFAULT;
104006 return 0;
104007 default:
104008diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104009index d7d70e6..bd5e9fc 100644
104010--- a/net/ipv6/reassembly.c
104011+++ b/net/ipv6/reassembly.c
104012@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104013
104014 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104015 {
104016- struct ctl_table *table;
104017+ ctl_table_no_const *table = NULL;
104018 struct ctl_table_header *hdr;
104019
104020- table = ip6_frags_ns_ctl_table;
104021 if (!net_eq(net, &init_net)) {
104022- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104023+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104024 if (table == NULL)
104025 goto err_alloc;
104026
104027@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104028 /* Don't export sysctls to unprivileged users */
104029 if (net->user_ns != &init_user_ns)
104030 table[0].procname = NULL;
104031- }
104032+ hdr = register_net_sysctl(net, "net/ipv6", table);
104033+ } else
104034+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104035
104036- hdr = register_net_sysctl(net, "net/ipv6", table);
104037 if (hdr == NULL)
104038 goto err_reg;
104039
104040@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104041 return 0;
104042
104043 err_reg:
104044- if (!net_eq(net, &init_net))
104045- kfree(table);
104046+ kfree(table);
104047 err_alloc:
104048 return -ENOMEM;
104049 }
104050diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104051index 1528d84..f393960 100644
104052--- a/net/ipv6/route.c
104053+++ b/net/ipv6/route.c
104054@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
104055
104056 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104057 {
104058- struct ctl_table *table;
104059+ ctl_table_no_const *table;
104060
104061 table = kmemdup(ipv6_route_table_template,
104062 sizeof(ipv6_route_table_template),
104063diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104064index cdbfe5a..e13eb31 100644
104065--- a/net/ipv6/sit.c
104066+++ b/net/ipv6/sit.c
104067@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104068 static void ipip6_dev_free(struct net_device *dev);
104069 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104070 __be32 *v4dst);
104071-static struct rtnl_link_ops sit_link_ops __read_mostly;
104072+static struct rtnl_link_ops sit_link_ops;
104073
104074 static int sit_net_id __read_mostly;
104075 struct sit_net {
104076@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104077 unregister_netdevice_queue(dev, head);
104078 }
104079
104080-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104081+static struct rtnl_link_ops sit_link_ops = {
104082 .kind = "sit",
104083 .maxtype = IFLA_IPTUN_MAX,
104084 .policy = ipip6_policy,
104085diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104086index c5c10fa..2577d51 100644
104087--- a/net/ipv6/sysctl_net_ipv6.c
104088+++ b/net/ipv6/sysctl_net_ipv6.c
104089@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104090
104091 static int __net_init ipv6_sysctl_net_init(struct net *net)
104092 {
104093- struct ctl_table *ipv6_table;
104094+ ctl_table_no_const *ipv6_table;
104095 struct ctl_table *ipv6_route_table;
104096 struct ctl_table *ipv6_icmp_table;
104097 int err;
104098diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104099index 9c0b54e..5e7bd8f 100644
104100--- a/net/ipv6/tcp_ipv6.c
104101+++ b/net/ipv6/tcp_ipv6.c
104102@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104103 }
104104 }
104105
104106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104107+extern int grsec_enable_blackhole;
104108+#endif
104109+
104110 static void tcp_v6_hash(struct sock *sk)
104111 {
104112 if (sk->sk_state != TCP_CLOSE) {
104113@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104114 return 0;
104115
104116 reset:
104117+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104118+ if (!grsec_enable_blackhole)
104119+#endif
104120 tcp_v6_send_reset(sk, skb);
104121 discard:
104122 if (opt_skb)
104123@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104124
104125 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104126 inet6_iif(skb));
104127- if (!sk)
104128+ if (!sk) {
104129+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104130+ ret = 1;
104131+#endif
104132 goto no_tcp_socket;
104133+ }
104134
104135 process:
104136- if (sk->sk_state == TCP_TIME_WAIT)
104137+ if (sk->sk_state == TCP_TIME_WAIT) {
104138+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104139+ ret = 2;
104140+#endif
104141 goto do_time_wait;
104142+ }
104143
104144 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104145 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104146@@ -1499,6 +1514,10 @@ csum_error:
104147 bad_packet:
104148 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104149 } else {
104150+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104151+ if (!grsec_enable_blackhole || (ret == 1 &&
104152+ (skb->dev->flags & IFF_LOOPBACK)))
104153+#endif
104154 tcp_v6_send_reset(NULL, skb);
104155 }
104156
104157diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104158index 189dc4a..458bec0 100644
104159--- a/net/ipv6/udp.c
104160+++ b/net/ipv6/udp.c
104161@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104162 udp_ipv6_hash_secret + net_hash_mix(net));
104163 }
104164
104165+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104166+extern int grsec_enable_blackhole;
104167+#endif
104168+
104169 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104170 {
104171 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104172@@ -448,7 +452,7 @@ try_again:
104173 if (unlikely(err)) {
104174 trace_kfree_skb(skb, udpv6_recvmsg);
104175 if (!peeked) {
104176- atomic_inc(&sk->sk_drops);
104177+ atomic_inc_unchecked(&sk->sk_drops);
104178 if (is_udp4)
104179 UDP_INC_STATS_USER(sock_net(sk),
104180 UDP_MIB_INERRORS,
104181@@ -714,7 +718,7 @@ csum_error:
104182 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104183 drop:
104184 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104185- atomic_inc(&sk->sk_drops);
104186+ atomic_inc_unchecked(&sk->sk_drops);
104187 kfree_skb(skb);
104188 return -1;
104189 }
104190@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104191 if (likely(skb1 == NULL))
104192 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104193 if (!skb1) {
104194- atomic_inc(&sk->sk_drops);
104195+ atomic_inc_unchecked(&sk->sk_drops);
104196 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104197 IS_UDPLITE(sk));
104198 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104199@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104200 goto csum_error;
104201
104202 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104203+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104204+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104205+#endif
104206 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104207
104208 kfree_skb(skb);
104209diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104210index 48bf5a0..691985a 100644
104211--- a/net/ipv6/xfrm6_policy.c
104212+++ b/net/ipv6/xfrm6_policy.c
104213@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104214 }
104215 }
104216
104217-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104218+static int xfrm6_garbage_collect(struct dst_ops *ops)
104219 {
104220 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104221
104222- xfrm6_policy_afinfo.garbage_collect(net);
104223+ xfrm_garbage_collect_deferred(net);
104224 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104225 }
104226
104227@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104228
104229 static int __net_init xfrm6_net_init(struct net *net)
104230 {
104231- struct ctl_table *table;
104232+ ctl_table_no_const *table = NULL;
104233 struct ctl_table_header *hdr;
104234
104235- table = xfrm6_policy_table;
104236 if (!net_eq(net, &init_net)) {
104237- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104238+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104239 if (!table)
104240 goto err_alloc;
104241
104242 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104243- }
104244+ hdr = register_net_sysctl(net, "net/ipv6", table);
104245+ } else
104246+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104247
104248- hdr = register_net_sysctl(net, "net/ipv6", table);
104249 if (!hdr)
104250 goto err_reg;
104251
104252@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104253 return 0;
104254
104255 err_reg:
104256- if (!net_eq(net, &init_net))
104257- kfree(table);
104258+ kfree(table);
104259 err_alloc:
104260 return -ENOMEM;
104261 }
104262diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104263index c1d247e..9e5949d 100644
104264--- a/net/ipx/ipx_proc.c
104265+++ b/net/ipx/ipx_proc.c
104266@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104267 struct proc_dir_entry *p;
104268 int rc = -ENOMEM;
104269
104270- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104271+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104272
104273 if (!ipx_proc_dir)
104274 goto out;
104275diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104276index 4efe486..dee966e 100644
104277--- a/net/irda/ircomm/ircomm_tty.c
104278+++ b/net/irda/ircomm/ircomm_tty.c
104279@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104280 add_wait_queue(&port->open_wait, &wait);
104281
104282 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104283- __FILE__, __LINE__, tty->driver->name, port->count);
104284+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104285
104286 spin_lock_irqsave(&port->lock, flags);
104287- port->count--;
104288+ atomic_dec(&port->count);
104289 port->blocked_open++;
104290 spin_unlock_irqrestore(&port->lock, flags);
104291
104292@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104293 }
104294
104295 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104296- __FILE__, __LINE__, tty->driver->name, port->count);
104297+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104298
104299 schedule();
104300 }
104301@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104302
104303 spin_lock_irqsave(&port->lock, flags);
104304 if (!tty_hung_up_p(filp))
104305- port->count++;
104306+ atomic_inc(&port->count);
104307 port->blocked_open--;
104308 spin_unlock_irqrestore(&port->lock, flags);
104309
104310 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104311- __FILE__, __LINE__, tty->driver->name, port->count);
104312+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104313
104314 if (!retval)
104315 port->flags |= ASYNC_NORMAL_ACTIVE;
104316@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104317
104318 /* ++ is not atomic, so this should be protected - Jean II */
104319 spin_lock_irqsave(&self->port.lock, flags);
104320- self->port.count++;
104321+ atomic_inc(&self->port.count);
104322 spin_unlock_irqrestore(&self->port.lock, flags);
104323 tty_port_tty_set(&self->port, tty);
104324
104325 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104326- self->line, self->port.count);
104327+ self->line, atomic_read(&self->port.count));
104328
104329 /* Not really used by us, but lets do it anyway */
104330 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104331@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104332 tty_kref_put(port->tty);
104333 }
104334 port->tty = NULL;
104335- port->count = 0;
104336+ atomic_set(&port->count, 0);
104337 spin_unlock_irqrestore(&port->lock, flags);
104338
104339 wake_up_interruptible(&port->open_wait);
104340@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104341 seq_putc(m, '\n');
104342
104343 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104344- seq_printf(m, "Open count: %d\n", self->port.count);
104345+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104346 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104347 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104348
104349diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104350index b9ac598..f88cc56 100644
104351--- a/net/irda/irproc.c
104352+++ b/net/irda/irproc.c
104353@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104354 {
104355 int i;
104356
104357- proc_irda = proc_mkdir("irda", init_net.proc_net);
104358+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104359 if (proc_irda == NULL)
104360 return;
104361
104362diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104363index 2e9953b..ed06350 100644
104364--- a/net/iucv/af_iucv.c
104365+++ b/net/iucv/af_iucv.c
104366@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104367 {
104368 char name[12];
104369
104370- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104371+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104372 while (__iucv_get_sock_by_name(name)) {
104373 sprintf(name, "%08x",
104374- atomic_inc_return(&iucv_sk_list.autobind_name));
104375+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104376 }
104377 memcpy(iucv->src_name, name, 8);
104378 }
104379diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104380index 2a6a1fd..6c112b0 100644
104381--- a/net/iucv/iucv.c
104382+++ b/net/iucv/iucv.c
104383@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104384 return NOTIFY_OK;
104385 }
104386
104387-static struct notifier_block __refdata iucv_cpu_notifier = {
104388+static struct notifier_block iucv_cpu_notifier = {
104389 .notifier_call = iucv_cpu_notify,
104390 };
104391
104392diff --git a/net/key/af_key.c b/net/key/af_key.c
104393index f8ac939..1e189bf 100644
104394--- a/net/key/af_key.c
104395+++ b/net/key/af_key.c
104396@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104397 static u32 get_acqseq(void)
104398 {
104399 u32 res;
104400- static atomic_t acqseq;
104401+ static atomic_unchecked_t acqseq;
104402
104403 do {
104404- res = atomic_inc_return(&acqseq);
104405+ res = atomic_inc_return_unchecked(&acqseq);
104406 } while (!res);
104407 return res;
104408 }
104409diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104410index 781b3a2..73a7434 100644
104411--- a/net/l2tp/l2tp_eth.c
104412+++ b/net/l2tp/l2tp_eth.c
104413@@ -42,12 +42,12 @@ struct l2tp_eth {
104414 struct sock *tunnel_sock;
104415 struct l2tp_session *session;
104416 struct list_head list;
104417- atomic_long_t tx_bytes;
104418- atomic_long_t tx_packets;
104419- atomic_long_t tx_dropped;
104420- atomic_long_t rx_bytes;
104421- atomic_long_t rx_packets;
104422- atomic_long_t rx_errors;
104423+ atomic_long_unchecked_t tx_bytes;
104424+ atomic_long_unchecked_t tx_packets;
104425+ atomic_long_unchecked_t tx_dropped;
104426+ atomic_long_unchecked_t rx_bytes;
104427+ atomic_long_unchecked_t rx_packets;
104428+ atomic_long_unchecked_t rx_errors;
104429 };
104430
104431 /* via l2tp_session_priv() */
104432@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104433 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104434
104435 if (likely(ret == NET_XMIT_SUCCESS)) {
104436- atomic_long_add(len, &priv->tx_bytes);
104437- atomic_long_inc(&priv->tx_packets);
104438+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104439+ atomic_long_inc_unchecked(&priv->tx_packets);
104440 } else {
104441- atomic_long_inc(&priv->tx_dropped);
104442+ atomic_long_inc_unchecked(&priv->tx_dropped);
104443 }
104444 return NETDEV_TX_OK;
104445 }
104446@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104447 {
104448 struct l2tp_eth *priv = netdev_priv(dev);
104449
104450- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104451- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104452- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104453- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104454- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104455- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104456+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104457+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104458+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104459+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104460+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104461+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104462 return stats;
104463 }
104464
104465@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104466 nf_reset(skb);
104467
104468 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104469- atomic_long_inc(&priv->rx_packets);
104470- atomic_long_add(data_len, &priv->rx_bytes);
104471+ atomic_long_inc_unchecked(&priv->rx_packets);
104472+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104473 } else {
104474- atomic_long_inc(&priv->rx_errors);
104475+ atomic_long_inc_unchecked(&priv->rx_errors);
104476 }
104477 return;
104478
104479 error:
104480- atomic_long_inc(&priv->rx_errors);
104481+ atomic_long_inc_unchecked(&priv->rx_errors);
104482 kfree_skb(skb);
104483 }
104484
104485diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104486index 1a3c7e0..80f8b0c 100644
104487--- a/net/llc/llc_proc.c
104488+++ b/net/llc/llc_proc.c
104489@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104490 int rc = -ENOMEM;
104491 struct proc_dir_entry *p;
104492
104493- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104494+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104495 if (!llc_proc_dir)
104496 goto out;
104497
104498diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104499index e75d5c5..429fc95 100644
104500--- a/net/mac80211/cfg.c
104501+++ b/net/mac80211/cfg.c
104502@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104503 ret = ieee80211_vif_use_channel(sdata, chandef,
104504 IEEE80211_CHANCTX_EXCLUSIVE);
104505 }
104506- } else if (local->open_count == local->monitors) {
104507+ } else if (local_read(&local->open_count) == local->monitors) {
104508 local->_oper_chandef = *chandef;
104509 ieee80211_hw_config(local, 0);
104510 }
104511@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104512 else
104513 local->probe_req_reg--;
104514
104515- if (!local->open_count)
104516+ if (!local_read(&local->open_count))
104517 break;
104518
104519 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104520@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104521 if (chanctx_conf) {
104522 *chandef = sdata->vif.bss_conf.chandef;
104523 ret = 0;
104524- } else if (local->open_count > 0 &&
104525- local->open_count == local->monitors &&
104526+ } else if (local_read(&local->open_count) > 0 &&
104527+ local_read(&local->open_count) == local->monitors &&
104528 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104529 if (local->use_chanctx)
104530 *chandef = local->monitor_chandef;
104531diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104532index cc6e964..029a3a3 100644
104533--- a/net/mac80211/ieee80211_i.h
104534+++ b/net/mac80211/ieee80211_i.h
104535@@ -29,6 +29,7 @@
104536 #include <net/ieee80211_radiotap.h>
104537 #include <net/cfg80211.h>
104538 #include <net/mac80211.h>
104539+#include <asm/local.h>
104540 #include "key.h"
104541 #include "sta_info.h"
104542 #include "debug.h"
104543@@ -1114,7 +1115,7 @@ struct ieee80211_local {
104544 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104545 spinlock_t queue_stop_reason_lock;
104546
104547- int open_count;
104548+ local_t open_count;
104549 int monitors, cooked_mntrs;
104550 /* number of interfaces with corresponding FIF_ flags */
104551 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104552diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104553index 4173553..e3b5a3f 100644
104554--- a/net/mac80211/iface.c
104555+++ b/net/mac80211/iface.c
104556@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104557 break;
104558 }
104559
104560- if (local->open_count == 0) {
104561+ if (local_read(&local->open_count) == 0) {
104562 res = drv_start(local);
104563 if (res)
104564 goto err_del_bss;
104565@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104566 res = drv_add_interface(local, sdata);
104567 if (res)
104568 goto err_stop;
104569- } else if (local->monitors == 0 && local->open_count == 0) {
104570+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104571 res = ieee80211_add_virtual_monitor(local);
104572 if (res)
104573 goto err_stop;
104574@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104575 atomic_inc(&local->iff_promiscs);
104576
104577 if (coming_up)
104578- local->open_count++;
104579+ local_inc(&local->open_count);
104580
104581 if (hw_reconf_flags)
104582 ieee80211_hw_config(local, hw_reconf_flags);
104583@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104584 err_del_interface:
104585 drv_remove_interface(local, sdata);
104586 err_stop:
104587- if (!local->open_count)
104588+ if (!local_read(&local->open_count))
104589 drv_stop(local);
104590 err_del_bss:
104591 sdata->bss = NULL;
104592@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104593 }
104594
104595 if (going_down)
104596- local->open_count--;
104597+ local_dec(&local->open_count);
104598
104599 switch (sdata->vif.type) {
104600 case NL80211_IFTYPE_AP_VLAN:
104601@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104602 }
104603 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104604
104605- if (local->open_count == 0)
104606+ if (local_read(&local->open_count) == 0)
104607 ieee80211_clear_tx_pending(local);
104608
104609 /*
104610@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104611 if (cancel_scan)
104612 flush_delayed_work(&local->scan_work);
104613
104614- if (local->open_count == 0) {
104615+ if (local_read(&local->open_count) == 0) {
104616 ieee80211_stop_device(local);
104617
104618 /* no reconfiguring after stop! */
104619@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104620 ieee80211_configure_filter(local);
104621 ieee80211_hw_config(local, hw_reconf_flags);
104622
104623- if (local->monitors == local->open_count)
104624+ if (local->monitors == local_read(&local->open_count))
104625 ieee80211_add_virtual_monitor(local);
104626 }
104627
104628diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104629index 6ab99da..f9502d4 100644
104630--- a/net/mac80211/main.c
104631+++ b/net/mac80211/main.c
104632@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104633 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104634 IEEE80211_CONF_CHANGE_POWER);
104635
104636- if (changed && local->open_count) {
104637+ if (changed && local_read(&local->open_count)) {
104638 ret = drv_config(local, changed);
104639 /*
104640 * Goal:
104641diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104642index 4a95fe3..0bfd713 100644
104643--- a/net/mac80211/pm.c
104644+++ b/net/mac80211/pm.c
104645@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104646 struct ieee80211_sub_if_data *sdata;
104647 struct sta_info *sta;
104648
104649- if (!local->open_count)
104650+ if (!local_read(&local->open_count))
104651 goto suspend;
104652
104653 ieee80211_scan_cancel(local);
104654@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104655 cancel_work_sync(&local->dynamic_ps_enable_work);
104656 del_timer_sync(&local->dynamic_ps_timer);
104657
104658- local->wowlan = wowlan && local->open_count;
104659+ local->wowlan = wowlan && local_read(&local->open_count);
104660 if (local->wowlan) {
104661 int err = drv_suspend(local, wowlan);
104662 if (err < 0) {
104663@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104664 WARN_ON(!list_empty(&local->chanctx_list));
104665
104666 /* stop hardware - this must stop RX */
104667- if (local->open_count)
104668+ if (local_read(&local->open_count))
104669 ieee80211_stop_device(local);
104670
104671 suspend:
104672diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104673index d53355b..21f583a 100644
104674--- a/net/mac80211/rate.c
104675+++ b/net/mac80211/rate.c
104676@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104677
104678 ASSERT_RTNL();
104679
104680- if (local->open_count)
104681+ if (local_read(&local->open_count))
104682 return -EBUSY;
104683
104684 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104685diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104686index 974ebe7..57bcd3c 100644
104687--- a/net/mac80211/util.c
104688+++ b/net/mac80211/util.c
104689@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104690 }
104691 #endif
104692 /* everything else happens only if HW was up & running */
104693- if (!local->open_count)
104694+ if (!local_read(&local->open_count))
104695 goto wake_up;
104696
104697 /*
104698@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104699 local->in_reconfig = false;
104700 barrier();
104701
104702- if (local->monitors == local->open_count && local->monitors > 0)
104703+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104704 ieee80211_add_virtual_monitor(local);
104705
104706 /*
104707diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104708index b02660f..c0f791c 100644
104709--- a/net/netfilter/Kconfig
104710+++ b/net/netfilter/Kconfig
104711@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104712
104713 To compile it as a module, choose M here. If unsure, say N.
104714
104715+config NETFILTER_XT_MATCH_GRADM
104716+ tristate '"gradm" match support'
104717+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104718+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104719+ ---help---
104720+ The gradm match allows to match on grsecurity RBAC being enabled.
104721+ It is useful when iptables rules are applied early on bootup to
104722+ prevent connections to the machine (except from a trusted host)
104723+ while the RBAC system is disabled.
104724+
104725 config NETFILTER_XT_MATCH_HASHLIMIT
104726 tristate '"hashlimit" match support'
104727 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104728diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104729index 89f73a9..e4e5bd9 100644
104730--- a/net/netfilter/Makefile
104731+++ b/net/netfilter/Makefile
104732@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104733 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104734 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104735 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104736+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104737 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104738 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104739 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104740diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104741index d259da3..6a32b2c 100644
104742--- a/net/netfilter/ipset/ip_set_core.c
104743+++ b/net/netfilter/ipset/ip_set_core.c
104744@@ -1952,7 +1952,7 @@ done:
104745 return ret;
104746 }
104747
104748-static struct nf_sockopt_ops so_set __read_mostly = {
104749+static struct nf_sockopt_ops so_set = {
104750 .pf = PF_INET,
104751 .get_optmin = SO_IP_SET,
104752 .get_optmax = SO_IP_SET + 1,
104753diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104754index b0f7b62..0541842 100644
104755--- a/net/netfilter/ipvs/ip_vs_conn.c
104756+++ b/net/netfilter/ipvs/ip_vs_conn.c
104757@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104758 /* Increase the refcnt counter of the dest */
104759 ip_vs_dest_hold(dest);
104760
104761- conn_flags = atomic_read(&dest->conn_flags);
104762+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104763 if (cp->protocol != IPPROTO_UDP)
104764 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104765 flags = cp->flags;
104766@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104767
104768 cp->control = NULL;
104769 atomic_set(&cp->n_control, 0);
104770- atomic_set(&cp->in_pkts, 0);
104771+ atomic_set_unchecked(&cp->in_pkts, 0);
104772
104773 cp->packet_xmit = NULL;
104774 cp->app = NULL;
104775@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104776
104777 /* Don't drop the entry if its number of incoming packets is not
104778 located in [0, 8] */
104779- i = atomic_read(&cp->in_pkts);
104780+ i = atomic_read_unchecked(&cp->in_pkts);
104781 if (i > 8 || i < 0) return 0;
104782
104783 if (!todrop_rate[i]) return 0;
104784diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104785index b87ca32..76c7799 100644
104786--- a/net/netfilter/ipvs/ip_vs_core.c
104787+++ b/net/netfilter/ipvs/ip_vs_core.c
104788@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104789 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104790 /* do not touch skb anymore */
104791
104792- atomic_inc(&cp->in_pkts);
104793+ atomic_inc_unchecked(&cp->in_pkts);
104794 ip_vs_conn_put(cp);
104795 return ret;
104796 }
104797@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104798 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104799 pkts = sysctl_sync_threshold(ipvs);
104800 else
104801- pkts = atomic_add_return(1, &cp->in_pkts);
104802+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104803
104804 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104805 ip_vs_sync_conn(net, cp, pkts);
104806diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104807index fdcda8b..dbc1979 100644
104808--- a/net/netfilter/ipvs/ip_vs_ctl.c
104809+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104810@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104811 */
104812 ip_vs_rs_hash(ipvs, dest);
104813 }
104814- atomic_set(&dest->conn_flags, conn_flags);
104815+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104816
104817 /* bind the service */
104818 old_svc = rcu_dereference_protected(dest->svc, 1);
104819@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104820 * align with netns init in ip_vs_control_net_init()
104821 */
104822
104823-static struct ctl_table vs_vars[] = {
104824+static ctl_table_no_const vs_vars[] __read_only = {
104825 {
104826 .procname = "amemthresh",
104827 .maxlen = sizeof(int),
104828@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104829 " %-7s %-6d %-10d %-10d\n",
104830 &dest->addr.in6,
104831 ntohs(dest->port),
104832- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104833+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104834 atomic_read(&dest->weight),
104835 atomic_read(&dest->activeconns),
104836 atomic_read(&dest->inactconns));
104837@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104838 "%-7s %-6d %-10d %-10d\n",
104839 ntohl(dest->addr.ip),
104840 ntohs(dest->port),
104841- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104842+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104843 atomic_read(&dest->weight),
104844 atomic_read(&dest->activeconns),
104845 atomic_read(&dest->inactconns));
104846@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104847
104848 entry.addr = dest->addr.ip;
104849 entry.port = dest->port;
104850- entry.conn_flags = atomic_read(&dest->conn_flags);
104851+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104852 entry.weight = atomic_read(&dest->weight);
104853 entry.u_threshold = dest->u_threshold;
104854 entry.l_threshold = dest->l_threshold;
104855@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104856 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104857 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104858 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104859- (atomic_read(&dest->conn_flags) &
104860+ (atomic_read_unchecked(&dest->conn_flags) &
104861 IP_VS_CONN_F_FWD_MASK)) ||
104862 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104863 atomic_read(&dest->weight)) ||
104864@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104865 {
104866 int idx;
104867 struct netns_ipvs *ipvs = net_ipvs(net);
104868- struct ctl_table *tbl;
104869+ ctl_table_no_const *tbl;
104870
104871 atomic_set(&ipvs->dropentry, 0);
104872 spin_lock_init(&ipvs->dropentry_lock);
104873diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104874index 127f140..553d652 100644
104875--- a/net/netfilter/ipvs/ip_vs_lblc.c
104876+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104877@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104878 * IPVS LBLC sysctl table
104879 */
104880 #ifdef CONFIG_SYSCTL
104881-static struct ctl_table vs_vars_table[] = {
104882+static ctl_table_no_const vs_vars_table[] __read_only = {
104883 {
104884 .procname = "lblc_expiration",
104885 .data = NULL,
104886diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104887index 2229d2d..b32b785 100644
104888--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104889+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104890@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104891 * IPVS LBLCR sysctl table
104892 */
104893
104894-static struct ctl_table vs_vars_table[] = {
104895+static ctl_table_no_const vs_vars_table[] __read_only = {
104896 {
104897 .procname = "lblcr_expiration",
104898 .data = NULL,
104899diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104900index d93ceeb..4556144 100644
104901--- a/net/netfilter/ipvs/ip_vs_sync.c
104902+++ b/net/netfilter/ipvs/ip_vs_sync.c
104903@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104904 cp = cp->control;
104905 if (cp) {
104906 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104907- pkts = atomic_add_return(1, &cp->in_pkts);
104908+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104909 else
104910 pkts = sysctl_sync_threshold(ipvs);
104911 ip_vs_sync_conn(net, cp->control, pkts);
104912@@ -771,7 +771,7 @@ control:
104913 if (!cp)
104914 return;
104915 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104916- pkts = atomic_add_return(1, &cp->in_pkts);
104917+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104918 else
104919 pkts = sysctl_sync_threshold(ipvs);
104920 goto sloop;
104921@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104922
104923 if (opt)
104924 memcpy(&cp->in_seq, opt, sizeof(*opt));
104925- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104926+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104927 cp->state = state;
104928 cp->old_state = cp->state;
104929 /*
104930diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104931index 3aedbda..6a63567 100644
104932--- a/net/netfilter/ipvs/ip_vs_xmit.c
104933+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104934@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104935 else
104936 rc = NF_ACCEPT;
104937 /* do not touch skb anymore */
104938- atomic_inc(&cp->in_pkts);
104939+ atomic_inc_unchecked(&cp->in_pkts);
104940 goto out;
104941 }
104942
104943@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
104944 else
104945 rc = NF_ACCEPT;
104946 /* do not touch skb anymore */
104947- atomic_inc(&cp->in_pkts);
104948+ atomic_inc_unchecked(&cp->in_pkts);
104949 goto out;
104950 }
104951
104952diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
104953index a4b5e2a..13b1de3 100644
104954--- a/net/netfilter/nf_conntrack_acct.c
104955+++ b/net/netfilter/nf_conntrack_acct.c
104956@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
104957 #ifdef CONFIG_SYSCTL
104958 static int nf_conntrack_acct_init_sysctl(struct net *net)
104959 {
104960- struct ctl_table *table;
104961+ ctl_table_no_const *table;
104962
104963 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
104964 GFP_KERNEL);
104965diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
104966index 46d1b26..b7f3b76 100644
104967--- a/net/netfilter/nf_conntrack_core.c
104968+++ b/net/netfilter/nf_conntrack_core.c
104969@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
104970 #define DYING_NULLS_VAL ((1<<30)+1)
104971 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
104972
104973+#ifdef CONFIG_GRKERNSEC_HIDESYM
104974+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
104975+#endif
104976+
104977 int nf_conntrack_init_net(struct net *net)
104978 {
104979 int ret = -ENOMEM;
104980@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
104981 if (!net->ct.stat)
104982 goto err_pcpu_lists;
104983
104984+#ifdef CONFIG_GRKERNSEC_HIDESYM
104985+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
104986+#else
104987 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
104988+#endif
104989 if (!net->ct.slabname)
104990 goto err_slabname;
104991
104992diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
104993index 4e78c57..ec8fb74 100644
104994--- a/net/netfilter/nf_conntrack_ecache.c
104995+++ b/net/netfilter/nf_conntrack_ecache.c
104996@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
104997 #ifdef CONFIG_SYSCTL
104998 static int nf_conntrack_event_init_sysctl(struct net *net)
104999 {
105000- struct ctl_table *table;
105001+ ctl_table_no_const *table;
105002
105003 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105004 GFP_KERNEL);
105005diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105006index bd9d315..989947e 100644
105007--- a/net/netfilter/nf_conntrack_helper.c
105008+++ b/net/netfilter/nf_conntrack_helper.c
105009@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105010
105011 static int nf_conntrack_helper_init_sysctl(struct net *net)
105012 {
105013- struct ctl_table *table;
105014+ ctl_table_no_const *table;
105015
105016 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105017 GFP_KERNEL);
105018diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105019index b65d586..beec902 100644
105020--- a/net/netfilter/nf_conntrack_proto.c
105021+++ b/net/netfilter/nf_conntrack_proto.c
105022@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105023
105024 static void
105025 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105026- struct ctl_table **table,
105027+ ctl_table_no_const **table,
105028 unsigned int users)
105029 {
105030 if (users > 0)
105031diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105032index fc823fa..8311af3 100644
105033--- a/net/netfilter/nf_conntrack_standalone.c
105034+++ b/net/netfilter/nf_conntrack_standalone.c
105035@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105036
105037 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105038 {
105039- struct ctl_table *table;
105040+ ctl_table_no_const *table;
105041
105042 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105043 GFP_KERNEL);
105044diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105045index 7a394df..bd91a8a 100644
105046--- a/net/netfilter/nf_conntrack_timestamp.c
105047+++ b/net/netfilter/nf_conntrack_timestamp.c
105048@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105049 #ifdef CONFIG_SYSCTL
105050 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105051 {
105052- struct ctl_table *table;
105053+ ctl_table_no_const *table;
105054
105055 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105056 GFP_KERNEL);
105057diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105058index 43c926c..a5731d8 100644
105059--- a/net/netfilter/nf_log.c
105060+++ b/net/netfilter/nf_log.c
105061@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
105062
105063 #ifdef CONFIG_SYSCTL
105064 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105065-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105066+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105067
105068 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105069 void __user *buffer, size_t *lenp, loff_t *ppos)
105070@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105071 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105072 mutex_unlock(&nf_log_mutex);
105073 } else {
105074+ ctl_table_no_const nf_log_table = *table;
105075+
105076 mutex_lock(&nf_log_mutex);
105077 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105078 if (!logger)
105079- table->data = "NONE";
105080+ nf_log_table.data = "NONE";
105081 else
105082- table->data = logger->name;
105083- r = proc_dostring(table, write, buffer, lenp, ppos);
105084+ nf_log_table.data = logger->name;
105085+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105086 mutex_unlock(&nf_log_mutex);
105087 }
105088
105089diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105090index c68c1e5..8b5d670 100644
105091--- a/net/netfilter/nf_sockopt.c
105092+++ b/net/netfilter/nf_sockopt.c
105093@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105094 }
105095 }
105096
105097- list_add(&reg->list, &nf_sockopts);
105098+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105099 out:
105100 mutex_unlock(&nf_sockopt_mutex);
105101 return ret;
105102@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105103 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105104 {
105105 mutex_lock(&nf_sockopt_mutex);
105106- list_del(&reg->list);
105107+ pax_list_del((struct list_head *)&reg->list);
105108 mutex_unlock(&nf_sockopt_mutex);
105109 }
105110 EXPORT_SYMBOL(nf_unregister_sockopt);
105111diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105112index 11d85b3..7fcc420 100644
105113--- a/net/netfilter/nfnetlink_log.c
105114+++ b/net/netfilter/nfnetlink_log.c
105115@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105116 struct nfnl_log_net {
105117 spinlock_t instances_lock;
105118 struct hlist_head instance_table[INSTANCE_BUCKETS];
105119- atomic_t global_seq;
105120+ atomic_unchecked_t global_seq;
105121 };
105122
105123 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105124@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105125 /* global sequence number */
105126 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105127 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105128- htonl(atomic_inc_return(&log->global_seq))))
105129+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105130 goto nla_put_failure;
105131
105132 if (data_len) {
105133diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105134new file mode 100644
105135index 0000000..c566332
105136--- /dev/null
105137+++ b/net/netfilter/xt_gradm.c
105138@@ -0,0 +1,51 @@
105139+/*
105140+ * gradm match for netfilter
105141